use of org.apache.accumulo.core.spi.cache.CacheEntry in project accumulo by apache.
the class TinyLfuBlockCache method resolveDependencies.
private Map<String, byte[]> resolveDependencies(Map<String, Loader> deps) {
if (deps.size() == 1) {
Entry<String, Loader> entry = deps.entrySet().iterator().next();
CacheEntry ce = getBlock(entry.getKey(), entry.getValue());
if (ce == null) {
return null;
}
return Collections.singletonMap(entry.getKey(), ce.getBuffer());
} else {
HashMap<String, byte[]> resolvedDeps = new HashMap<>();
for (Entry<String, Loader> entry : deps.entrySet()) {
CacheEntry ce = getBlock(entry.getKey(), entry.getValue());
if (ce == null) {
return null;
}
resolvedDeps.put(entry.getKey(), ce.getBuffer());
}
return resolvedDeps;
}
}
use of org.apache.accumulo.core.spi.cache.CacheEntry in project accumulo by apache.
the class SynchronousLoadingBlockCache method getBlock.
@Override
public CacheEntry getBlock(String blockName, Loader loader) {
CacheEntry ce = getBlock(blockName);
if (ce != null) {
return ce;
}
// intentionally done before getting lock
Map<String, byte[]> depData = resolveDependencies(loader.getDependencies());
if (depData == null) {
return null;
}
int lockIndex = (blockName.hashCode() & 0x7fffffff) % loadLocks.length;
Lock loadLock = loadLocks[lockIndex];
try {
loadLock.lock();
// check again after getting lock, could have loaded while waiting on lock
ce = getBlockNoStats(blockName);
if (ce != null) {
return ce;
}
// not in cache so load data
byte[] data = loader.load(getMaxEntrySize(), depData);
if (data == null) {
return null;
}
// attempt to add data to cache
return cacheBlock(blockName, data);
} finally {
loadLock.unlock();
}
}
use of org.apache.accumulo.core.spi.cache.CacheEntry in project accumulo by apache.
the class TestLruBlockCache method testCacheSimple.
@Test
public void testCacheSimple() throws Exception {
long maxSize = 1000000;
long blockSize = calculateBlockSizeDefault(maxSize, 101);
DefaultConfiguration dc = DefaultConfiguration.getInstance();
ConfigurationCopy cc = new ConfigurationCopy(dc);
cc.set(Property.TSERV_CACHE_MANAGER_IMPL, LruBlockCacheManager.class.getName());
BlockCacheManager manager = BlockCacheManagerFactory.getInstance(cc);
cc.set(Property.TSERV_DEFAULT_BLOCKSIZE, Long.toString(blockSize));
cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(maxSize));
manager.start(new BlockCacheConfiguration(cc));
LruBlockCache cache = (LruBlockCache) manager.getBlockCache(CacheType.INDEX);
Block[] blocks = generateRandomBlocks(100, blockSize);
long expectedCacheSize = cache.heapSize();
// Confirm empty
for (Block block : blocks) {
assertNull(cache.getBlock(block.blockName));
}
// Add blocks
for (Block block : blocks) {
cache.cacheBlock(block.blockName, block.buf);
expectedCacheSize += block.heapSize();
}
// Verify correctly calculated cache heap size
assertEquals(expectedCacheSize, cache.heapSize());
// Check if all blocks are properly cached and retrieved
for (Block block : blocks) {
CacheEntry ce = cache.getBlock(block.blockName);
assertNotNull(ce);
assertEquals(ce.getBuffer().length, block.buf.length);
}
// Verify correctly calculated cache heap size
assertEquals(expectedCacheSize, cache.heapSize());
// Check if all blocks are properly cached and retrieved
for (Block block : blocks) {
CacheEntry ce = cache.getBlock(block.blockName);
assertNotNull(ce);
assertEquals(ce.getBuffer().length, block.buf.length);
}
// Expect no evictions
assertEquals(0, cache.getEvictionCount());
// Thread t = new LruBlockCache.StatisticsThread(cache);
// t.start();
// t.join();
manager.stop();
}
use of org.apache.accumulo.core.spi.cache.CacheEntry in project accumulo by apache.
the class BlockIndexTest method test1.
@Test
public void test1() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
Key prevKey = null;
int num = 1000;
for (int i = 0; i < num; i++) {
Key key = new Key(RFileTest.formatString("", i), "cf1", "cq1");
new RelativeKey(prevKey, key).write(out);
new Value().write(out);
prevKey = key;
}
out.close();
final byte[] data = baos.toByteArray();
CacheEntry ce = new MyCacheEntry(data);
CachableBlockFile.CachedBlockRead cacheBlock = new CachableBlockFile.CachedBlockRead(ce, data);
BlockIndex blockIndex = null;
for (int i = 0; i < 129; i++) blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, num, 0, 0, 0));
BlockIndexEntry[] indexEntries = blockIndex.getIndexEntries();
for (int i = 0; i < indexEntries.length; i++) {
int row = Integer.parseInt(indexEntries[i].getPrevKey().getRowData().toString());
BlockIndexEntry bie;
bie = blockIndex.seekBlock(new Key(RFileTest.formatString("", row), "cf1", "cq1"), cacheBlock);
if (i == 0)
assertSame(null, bie);
else
assertSame(indexEntries[i - 1], bie);
assertSame(bie, blockIndex.seekBlock(new Key(RFileTest.formatString("", row - 1), "cf1", "cq1"), cacheBlock));
bie = blockIndex.seekBlock(new Key(RFileTest.formatString("", row + 1), "cf1", "cq1"), cacheBlock);
assertSame(indexEntries[i], bie);
RelativeKey rk = new RelativeKey();
rk.setPrevKey(bie.getPrevKey());
rk.readFields(cacheBlock);
assertEquals(rk.getKey(), new Key(RFileTest.formatString("", row + 1), "cf1", "cq1"));
}
cacheBlock.close();
}
use of org.apache.accumulo.core.spi.cache.CacheEntry in project accumulo by apache.
the class SynchronousLoadingBlockCache method resolveDependencies.
private Map<String, byte[]> resolveDependencies(Map<String, Loader> loaderDeps) {
Map<String, byte[]> depData;
switch(loaderDeps.size()) {
case 0:
depData = Collections.emptyMap();
break;
case 1:
{
Entry<String, Loader> entry = loaderDeps.entrySet().iterator().next();
CacheEntry dce = getBlock(entry.getKey(), entry.getValue());
if (dce == null) {
depData = null;
} else {
depData = Collections.singletonMap(entry.getKey(), dce.getBuffer());
}
break;
}
default:
depData = new HashMap<>();
Set<Entry<String, Loader>> es = loaderDeps.entrySet();
for (Entry<String, Loader> entry : es) {
CacheEntry dce = getBlock(entry.getKey(), entry.getValue());
if (dce == null) {
depData = null;
break;
}
depData.put(entry.getKey(), dce.getBuffer());
}
break;
}
return depData;
}
Aggregations