Search in sources :

Example 1 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class HFileBlock method sanityCheck.

/**
   * Checks if the block is internally consistent, i.e. the first
   * {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a
   * valid header consistent with the fields. Assumes a packed block structure.
   * This function is primary for testing and debugging, and is not
   * thread-safe, because it alters the internal buffer pointer.
   * Used by tests only.
   */
@VisibleForTesting
void sanityCheck() throws IOException {
    // Duplicate so no side-effects
    ByteBuff dup = this.buf.duplicate().rewind();
    sanityCheckAssertion(BlockType.read(dup), blockType);
    sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader");
    sanityCheckAssertion(dup.getInt(), uncompressedSizeWithoutHeader, "uncompressedSizeWithoutHeader");
    sanityCheckAssertion(dup.getLong(), prevBlockOffset, "prevBlockOffset");
    if (this.fileContext.isUseHBaseChecksum()) {
        sanityCheckAssertion(dup.get(), this.fileContext.getChecksumType().getCode(), "checksumType");
        sanityCheckAssertion(dup.getInt(), this.fileContext.getBytesPerChecksum(), "bytesPerChecksum");
        sanityCheckAssertion(dup.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader");
    }
    int cksumBytes = totalChecksumBytes();
    int expectedBufLimit = onDiskDataSizeWithHeader + cksumBytes;
    if (dup.limit() != expectedBufLimit) {
        throw new AssertionError("Expected limit " + expectedBufLimit + ", got " + dup.limit());
    }
    // We might optionally allocate HFILEBLOCK_HEADER_SIZE more bytes to read the next
    // block's header, so there are two sensible values for buffer capacity.
    int hdrSize = headerSize();
    if (dup.capacity() != expectedBufLimit && dup.capacity() != expectedBufLimit + hdrSize) {
        throw new AssertionError("Invalid buffer capacity: " + dup.capacity() + ", expected " + expectedBufLimit + " or " + (expectedBufLimit + hdrSize));
    }
}
Also used : MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class CompoundBloomFilter method contains.

@Override
public boolean contains(Cell keyCell, ByteBuff bloom, BloomType type) {
    int block = index.rootBlockContainingKey(keyCell);
    if (block < 0) {
        // This key is not in the file.
        return false;
    }
    boolean result;
    HFileBlock bloomBlock = getBloomBlock(block);
    try {
        ByteBuff bloomBuf = bloomBlock.getBufferReadOnly();
        result = BloomFilterUtil.contains(keyCell, bloomBuf, bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount, type);
    } finally {
        // After the use return back the block if it was served from a cache.
        reader.returnBlock(bloomBlock);
    }
    if (numPositivesPerChunk != null && result) {
        // Update statistics. Only used in unit tests.
        ++numPositivesPerChunk[block];
    }
    return result;
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff)

Example 3 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class HFileBlock method toString.

@Override
public String toString() {
    StringBuilder sb = new StringBuilder().append("[").append("blockType=").append(blockType).append(", fileOffset=").append(offset).append(", headerSize=").append(headerSize()).append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader).append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader).append(", prevBlockOffset=").append(prevBlockOffset).append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum());
    if (fileContext.isUseHBaseChecksum()) {
        sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))).append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)).append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader);
    } else {
        sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader).append("(").append(onDiskSizeWithoutHeader).append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")");
    }
    String dataBegin = null;
    if (buf.hasArray()) {
        dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), Math.min(32, buf.limit() - buf.arrayOffset() - headerSize()));
    } else {
        ByteBuff bufWithoutHeader = getBufferWithoutHeader();
        byte[] dataBeginBytes = new byte[Math.min(32, bufWithoutHeader.limit() - bufWithoutHeader.position())];
        bufWithoutHeader.get(dataBeginBytes);
        dataBegin = Bytes.toStringBinary(dataBeginBytes);
    }
    sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()).append(", totalChecksumBytes=").append(totalChecksumBytes()).append(", isUnpacked=").append(isUnpacked()).append(", buf=[").append(buf).append("]").append(", dataBeginsWith=").append(dataBegin).append(", fileContext=").append(fileContext).append("]");
    return sb.toString();
}
Also used : MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff)

Example 4 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestByteBufferArray method testAsSubBufferWhenEndOffsetLandInLastBuffer.

@Test
public void testAsSubBufferWhenEndOffsetLandInLastBuffer() throws Exception {
    int capacity = 4 * 1024 * 1024;
    ByteBufferAllocator allocator = new ByteBufferAllocator() {

        @Override
        public ByteBuffer allocate(long size, boolean directByteBuffer) throws IOException {
            if (directByteBuffer) {
                return ByteBuffer.allocateDirect((int) size);
            } else {
                return ByteBuffer.allocate((int) size);
            }
        }
    };
    ByteBufferArray array = new ByteBufferArray(capacity, false, allocator);
    ByteBuff subBuf = array.asSubByteBuff(0, capacity);
    // Position to the last byte
    subBuf.position(capacity - 1);
    assertTrue(subBuf.hasRemaining());
    // Read last byte
    subBuf.get();
    assertFalse(subBuf.hasRemaining());
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) Test(org.junit.Test)

Example 5 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class StoreFileReader method checkGeneralBloomFilter.

private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloomFilter) {
    // Empty file
    if (reader.getTrailer().getEntryCount() == 0) {
        return false;
    }
    HFileBlock bloomBlock = null;
    try {
        boolean shouldCheckBloom;
        ByteBuff bloom;
        if (bloomFilter.supportsAutoLoading()) {
            bloom = null;
            shouldCheckBloom = true;
        } else {
            bloomBlock = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, true);
            bloom = bloomBlock.getBufferWithoutHeader();
            shouldCheckBloom = bloom != null;
        }
        if (shouldCheckBloom) {
            boolean exists;
            // Whether the primary Bloom key is greater than the last Bloom key
            // from the file info. For row-column Bloom filters this is not yet
            // a sufficient condition to return false.
            boolean keyIsAfterLast = (lastBloomKey != null);
            // of the hbase:meta cells.  We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom
            if (keyIsAfterLast) {
                if (bloomFilterType == BloomType.ROW) {
                    keyIsAfterLast = (Bytes.BYTES_RAWCOMPARATOR.compare(key, lastBloomKey) > 0);
                } else {
                    keyIsAfterLast = (CellComparator.COMPARATOR.compare(kvKey, lastBloomKeyOnlyKV)) > 0;
                }
            }
            if (bloomFilterType == BloomType.ROWCOL) {
                // Since a Row Delete is essentially a DeleteFamily applied to all
                // columns, a file might be skipped if using row+col Bloom filter.
                // In order to ensure this file is included an additional check is
                // required looking only for a row bloom.
                Cell rowBloomKey = CellUtil.createFirstOnRow(kvKey);
                // of the hbase:meta cells.  We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom
                if (keyIsAfterLast && (CellComparator.COMPARATOR.compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) {
                    exists = false;
                } else {
                    exists = bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) || bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL);
                }
            } else {
                exists = !keyIsAfterLast && bloomFilter.contains(key, 0, key.length, bloom);
            }
            return exists;
        }
    } catch (IOException e) {
        LOG.error("Error reading bloom filter data -- proceeding without", e);
        setGeneralBloomFilterFaulty();
    } catch (IllegalArgumentException e) {
        LOG.error("Bad bloom filter data -- proceeding without", e);
        setGeneralBloomFilterFaulty();
    } finally {
        // Return the bloom block so that its ref count can be decremented.
        reader.returnBlock(bloomBlock);
    }
    return true;
}
Also used : HFileBlock(org.apache.hadoop.hbase.io.hfile.HFileBlock) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)23 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)9 ByteBuffer (java.nio.ByteBuffer)8 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)5 Path (org.apache.hadoop.fs.Path)5 DataInputStream (java.io.DataInputStream)3 Random (java.util.Random)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 Compression (org.apache.hadoop.hbase.io.compress.Compression)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ByteArrayInputStream (java.io.ByteArrayInputStream)2 DataOutputStream (java.io.DataOutputStream)2 File (java.io.File)2 IOException (java.io.IOException)2 Cell (org.apache.hadoop.hbase.Cell)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)2