use of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding in project hbase by apache.
the class HFileReaderImpl method getCachedBlock.
/**
* Retrieve block from cache. Validates the retrieved block's type vs {@code expectedBlockType}
* and its encoding vs. {@code expectedDataBlockEncoding}. Unpacks the block as necessary.
*/
private HFileBlock getCachedBlock(BlockCacheKey cacheKey, boolean cacheBlock, boolean useLock, boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException {
// Check cache for block. If found return.
BlockCache cache = cacheConf.getBlockCache().orElse(null);
if (cache != null) {
HFileBlock cachedBlock = (HFileBlock) cache.getBlock(cacheKey, cacheBlock, useLock, updateCacheMetrics, expectedBlockType);
if (cachedBlock != null) {
if (cacheConf.shouldCacheCompressed(cachedBlock.getBlockType().getCategory())) {
HFileBlock compressedBlock = cachedBlock;
cachedBlock = compressedBlock.unpack(hfileContext, fsBlockReader);
// In case of compressed block after unpacking we can release the compressed block
if (compressedBlock != cachedBlock) {
compressedBlock.release();
}
}
try {
validateBlockType(cachedBlock, expectedBlockType);
} catch (IOException e) {
returnAndEvictBlock(cache, cacheKey, cachedBlock);
throw e;
}
if (expectedDataBlockEncoding == null) {
return cachedBlock;
}
DataBlockEncoding actualDataBlockEncoding = cachedBlock.getDataBlockEncoding();
// perform this check if cached block is a data block.
if (cachedBlock.getBlockType().isData() && !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) {
// justify the work on a CompoundScanner.
if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) {
// If the block is encoded but the encoding does not match the
// expected encoding it is likely the encoding was changed but the
// block was not yet evicted. Evictions on file close happen async
// so blocks with the old encoding still linger in cache for some
// period of time. This event should be rare as it only happens on
// schema definition change.
LOG.info("Evicting cached block with key {} because data block encoding mismatch; " + "expected {}, actual {}, path={}", cacheKey, actualDataBlockEncoding, expectedDataBlockEncoding, path);
// This is an error scenario. so here we need to release the block.
returnAndEvictBlock(cache, cacheKey, cachedBlock);
}
return null;
}
return cachedBlock;
}
}
return null;
}
use of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding in project hbase by apache.
the class LruBlockCache method getEncodingCountsForTest.
public Map<DataBlockEncoding, Integer> getEncodingCountsForTest() {
Map<DataBlockEncoding, Integer> counts = new EnumMap<>(DataBlockEncoding.class);
for (LruCachedBlock block : map.values()) {
DataBlockEncoding encoding = ((HFileBlock) block.getBuffer()).getDataBlockEncoding();
Integer count = counts.get(encoding);
counts.put(encoding, (count == null ? 0 : count) + 1);
}
return counts;
}
use of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding in project hbase by apache.
the class TestHStoreFile method testDataBlockEncodingMetaData.
/**
* Check if data block encoding information is saved correctly in HFile's file info.
*/
@Test
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path dir = new Path(new Path(testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF;
cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
writer.close();
HStoreFile storeFile = new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
storeFile.initReader();
StoreFileReader reader = storeFile.getReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertArrayEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
use of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding in project hbase by apache.
the class TestMajorCompaction method majorCompactionWithDataBlockEncoding.
public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly) throws Exception {
Map<HStore, HFileDataBlockEncoder> replaceBlockCache = new HashMap<>();
for (HStore store : r.getStores()) {
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache;
((HStore) store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
}
majorCompaction();
// restore settings
for (Entry<HStore, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
((HStore) entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
}
}
use of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding in project hbase by apache.
the class TestHFileOutputFormat2 method setupMockColumnFamiliesForDataBlockEncoding.
private void setupMockColumnFamiliesForDataBlockEncoding(Table table, Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]);
for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(entry.getKey())).setMaxVersions(1).setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0).build();
mockTableDescriptor.setColumnFamily(columnFamilyDescriptor);
}
Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
}
Aggregations