use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class HFileBlock method sanityCheck.
/**
* Checks if the block is internally consistent, i.e. the first
* {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the buffer contain a
* valid header consistent with the fields. Assumes a packed block structure.
* This function is primary for testing and debugging, and is not
* thread-safe, because it alters the internal buffer pointer.
* Used by tests only.
*/
@VisibleForTesting
void sanityCheck() throws IOException {
// Duplicate so no side-effects
ByteBuff dup = this.buf.duplicate().rewind();
sanityCheckAssertion(BlockType.read(dup), blockType);
sanityCheckAssertion(dup.getInt(), onDiskSizeWithoutHeader, "onDiskSizeWithoutHeader");
sanityCheckAssertion(dup.getInt(), uncompressedSizeWithoutHeader, "uncompressedSizeWithoutHeader");
sanityCheckAssertion(dup.getLong(), prevBlockOffset, "prevBlockOffset");
if (this.fileContext.isUseHBaseChecksum()) {
sanityCheckAssertion(dup.get(), this.fileContext.getChecksumType().getCode(), "checksumType");
sanityCheckAssertion(dup.getInt(), this.fileContext.getBytesPerChecksum(), "bytesPerChecksum");
sanityCheckAssertion(dup.getInt(), onDiskDataSizeWithHeader, "onDiskDataSizeWithHeader");
}
int cksumBytes = totalChecksumBytes();
int expectedBufLimit = onDiskDataSizeWithHeader + cksumBytes;
if (dup.limit() != expectedBufLimit) {
throw new AssertionError("Expected limit " + expectedBufLimit + ", got " + dup.limit());
}
// We might optionally allocate HFILEBLOCK_HEADER_SIZE more bytes to read the next
// block's header, so there are two sensible values for buffer capacity.
int hdrSize = headerSize();
if (dup.capacity() != expectedBufLimit && dup.capacity() != expectedBufLimit + hdrSize) {
throw new AssertionError("Invalid buffer capacity: " + dup.capacity() + ", expected " + expectedBufLimit + " or " + (expectedBufLimit + hdrSize));
}
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class CompoundBloomFilter method contains.
@Override
public boolean contains(Cell keyCell, ByteBuff bloom, BloomType type) {
int block = index.rootBlockContainingKey(keyCell);
if (block < 0) {
// This key is not in the file.
return false;
}
boolean result;
HFileBlock bloomBlock = getBloomBlock(block);
try {
ByteBuff bloomBuf = bloomBlock.getBufferReadOnly();
result = BloomFilterUtil.contains(keyCell, bloomBuf, bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount, type);
} finally {
// After the use return back the block if it was served from a cache.
reader.returnBlock(bloomBlock);
}
if (numPositivesPerChunk != null && result) {
// Update statistics. Only used in unit tests.
++numPositivesPerChunk[block];
}
return result;
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class HFileBlock method toString.
@Override
public String toString() {
StringBuilder sb = new StringBuilder().append("[").append("blockType=").append(blockType).append(", fileOffset=").append(offset).append(", headerSize=").append(headerSize()).append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader).append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader).append(", prevBlockOffset=").append(prevBlockOffset).append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum());
if (fileContext.isUseHBaseChecksum()) {
sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))).append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)).append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader);
} else {
sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader).append("(").append(onDiskSizeWithoutHeader).append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")");
}
String dataBegin = null;
if (buf.hasArray()) {
dataBegin = Bytes.toStringBinary(buf.array(), buf.arrayOffset() + headerSize(), Math.min(32, buf.limit() - buf.arrayOffset() - headerSize()));
} else {
ByteBuff bufWithoutHeader = getBufferWithoutHeader();
byte[] dataBeginBytes = new byte[Math.min(32, bufWithoutHeader.limit() - bufWithoutHeader.position())];
bufWithoutHeader.get(dataBeginBytes);
dataBegin = Bytes.toStringBinary(dataBeginBytes);
}
sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()).append(", totalChecksumBytes=").append(totalChecksumBytes()).append(", isUnpacked=").append(isUnpacked()).append(", buf=[").append(buf).append("]").append(", dataBeginsWith=").append(dataBegin).append(", fileContext=").append(fileContext).append("]");
return sb.toString();
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class TestByteBufferArray method testAsSubBufferWhenEndOffsetLandInLastBuffer.
@Test
public void testAsSubBufferWhenEndOffsetLandInLastBuffer() throws Exception {
int capacity = 4 * 1024 * 1024;
ByteBufferAllocator allocator = new ByteBufferAllocator() {
@Override
public ByteBuffer allocate(long size, boolean directByteBuffer) throws IOException {
if (directByteBuffer) {
return ByteBuffer.allocateDirect((int) size);
} else {
return ByteBuffer.allocate((int) size);
}
}
};
ByteBufferArray array = new ByteBufferArray(capacity, false, allocator);
ByteBuff subBuf = array.asSubByteBuff(0, capacity);
// Position to the last byte
subBuf.position(capacity - 1);
assertTrue(subBuf.hasRemaining());
// Read last byte
subBuf.get();
assertFalse(subBuf.hasRemaining());
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class StoreFileReader method checkGeneralBloomFilter.
private boolean checkGeneralBloomFilter(byte[] key, Cell kvKey, BloomFilter bloomFilter) {
// Empty file
if (reader.getTrailer().getEntryCount() == 0) {
return false;
}
HFileBlock bloomBlock = null;
try {
boolean shouldCheckBloom;
ByteBuff bloom;
if (bloomFilter.supportsAutoLoading()) {
bloom = null;
shouldCheckBloom = true;
} else {
bloomBlock = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, true);
bloom = bloomBlock.getBufferWithoutHeader();
shouldCheckBloom = bloom != null;
}
if (shouldCheckBloom) {
boolean exists;
// Whether the primary Bloom key is greater than the last Bloom key
// from the file info. For row-column Bloom filters this is not yet
// a sufficient condition to return false.
boolean keyIsAfterLast = (lastBloomKey != null);
// of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom
if (keyIsAfterLast) {
if (bloomFilterType == BloomType.ROW) {
keyIsAfterLast = (Bytes.BYTES_RAWCOMPARATOR.compare(key, lastBloomKey) > 0);
} else {
keyIsAfterLast = (CellComparator.COMPARATOR.compare(kvKey, lastBloomKeyOnlyKV)) > 0;
}
}
if (bloomFilterType == BloomType.ROWCOL) {
// Since a Row Delete is essentially a DeleteFamily applied to all
// columns, a file might be skipped if using row+col Bloom filter.
// In order to ensure this file is included an additional check is
// required looking only for a row bloom.
Cell rowBloomKey = CellUtil.createFirstOnRow(kvKey);
// of the hbase:meta cells. We can safely use Bytes.BYTES_RAWCOMPARATOR for ROW Bloom
if (keyIsAfterLast && (CellComparator.COMPARATOR.compare(rowBloomKey, lastBloomKeyOnlyKV)) > 0) {
exists = false;
} else {
exists = bloomFilter.contains(kvKey, bloom, BloomType.ROWCOL) || bloomFilter.contains(rowBloomKey, bloom, BloomType.ROWCOL);
}
} else {
exists = !keyIsAfterLast && bloomFilter.contains(key, 0, key.length, bloom);
}
return exists;
}
} catch (IOException e) {
LOG.error("Error reading bloom filter data -- proceeding without", e);
setGeneralBloomFilterFaulty();
} catch (IllegalArgumentException e) {
LOG.error("Bad bloom filter data -- proceeding without", e);
setGeneralBloomFilterFaulty();
} finally {
// Return the bloom block so that its ref count can be decremented.
reader.returnBlock(bloomBlock);
}
return true;
}
Aggregations