use of org.apache.geode.cache.CacheLoader in project geode by apache.
the class LRUEvictionControllerDUnitTest method testSizeOne.
/**
* Tests an <code>LRUCapacityController</code> of size 1.
*/
@Test
public void testSizeOne() throws CacheException {
int threshold = 1;
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setOffHeap(isOffHeapEnabled());
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(threshold));
factory.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return "LOADED VALUE";
}
public void close() {
}
});
Region region;
if (usingMain) {
DistributedSystem system = DistributedSystem.connect(new Properties());
Cache cache = CacheFactory.create(system);
region = cache.createRegion("Test", factory.create());
} else {
region = createRegion(name, factory.create());
}
LRUStatistics lruStats = getLRUStats(region);
assertNotNull(lruStats);
for (int i = 1; i <= 1; i++) {
Object key = new Integer(i);
Object value = String.valueOf(i);
region.put(key, value);
assertEquals(1, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
}
for (int i = 2; i <= 10; i++) {
Object key = new Integer(i);
Object value = String.valueOf(i);
region.put(key, value);
assertEquals(1, lruStats.getCounter());
assertEquals(i - 1, lruStats.getEvictions());
}
for (int i = 11; i <= 20; i++) {
Object key = new Integer(i);
// Object value = String.valueOf(i);
// Invoke loader
region.get(key);
assertEquals(1, lruStats.getCounter());
assertEquals(i - 1, lruStats.getEvictions());
}
}
use of org.apache.geode.cache.CacheLoader in project geode by apache.
the class RebalanceOperationDUnitTest method runTestWaitForOperation.
/**
* Test to ensure that we wait for in progress write operations before moving a primary.
*
* @throws Exception
*/
public void runTestWaitForOperation(final Operation op) throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return "anobject";
}
public void close() {
}
});
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
paf.setLocalMaxMemory(100);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
cache.createRegion("region1", attr.create());
}
};
// Create a region in this VM with a cache writer
// and cache loader
Cache cache = getCache();
AttributesFactory attr = new AttributesFactory();
attr.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return "anobject";
}
public void close() {
}
});
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
paf.setLocalMaxMemory(100);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
final Region region = cache.createRegion("region1", attr.create());
// create some buckets
region.put(Integer.valueOf(1), "A");
region.put(Integer.valueOf(2), "A");
BlockingCacheListener cacheWriter = new BlockingCacheListener(2);
region.getAttributesMutator().addCacheListener(cacheWriter);
// start two threads doing operations, one on each bucket
// the threads will block on the cache writer. The rebalance operation
// will try to move one of these buckets, but it shouldn't
// be able to because of the in progress operation.
Thread thread1 = new Thread() {
public void run() {
op.execute(region, Integer.valueOf(1));
}
};
thread1.start();
Thread thread2 = new Thread() {
public void run() {
op.execute(region, Integer.valueOf(2));
}
};
thread2.start();
cacheWriter.waitForOperationsStarted();
SerializableRunnable checkLowRedundancy = new SerializableRunnable("checkLowRedundancy") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(2, details.getCreatedBucketCount());
assertEquals(0, details.getActualRedundantCopies());
assertEquals(2, details.getLowRedundancyBucketCount());
}
};
// make sure we can tell that the buckets have low redundancy
checkLowRedundancy.run();
// Create the region in the other VM (should have no effect)
vm1.invoke(createPrRegion);
// Make sure we still have low redundancy
checkLowRedundancy.run();
ResourceManager manager = cache.getResourceManager();
RebalanceOperation rebalance = manager.createRebalanceFactory().start();
try {
rebalance.getResults(5, TimeUnit.SECONDS);
fail("Operation should not have completed");
} catch (TimeoutException expected) {
// do nothing
}
cacheWriter.release();
LogWriterUtils.getLogWriter().info("starting wait for rebalance. Will wait for " + MAX_WAIT + " seconds");
RebalanceResults results = rebalance.getResults(MAX_WAIT, TimeUnit.SECONDS);
assertEquals(2, results.getTotalBucketCreatesCompleted());
assertEquals(1, results.getTotalPrimaryTransfersCompleted());
assertEquals(0, results.getTotalBucketTransferBytes());
assertEquals(0, results.getTotalBucketTransfersCompleted());
Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
assertEquals(1, detailSet.size());
PartitionRebalanceInfo details = detailSet.iterator().next();
assertEquals(2, details.getBucketCreatesCompleted());
assertEquals(1, details.getPrimaryTransfersCompleted());
assertEquals(0, details.getBucketTransferBytes());
assertEquals(0, details.getBucketTransfersCompleted());
Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
assertEquals(2, afterDetails.size());
for (PartitionMemberInfo memberDetails : afterDetails) {
assertEquals(2, memberDetails.getBucketCount());
assertEquals(1, memberDetails.getPrimaryCount());
SerializableRunnable checkRedundancyFixed = new SerializableRunnable("checkRedundancyFixed") {
public void run() {
Cache cache = getCache();
Region region = cache.getRegion("region1");
PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
assertEquals(2, details.getCreatedBucketCount());
assertEquals(1, details.getActualRedundantCopies());
assertEquals(0, details.getLowRedundancyBucketCount());
}
};
checkRedundancyFixed.run();
vm1.invoke(checkRedundancyFixed);
}
}
use of org.apache.geode.cache.CacheLoader in project geode by apache.
the class SearchLoadAndWriteProcessor method load.
private void load(EntryEventImpl event) throws CacheLoaderException, TimeoutException {
Object obj = null;
RegionAttributes attrs = this.region.getAttributes();
Scope scope = attrs.getScope();
CacheLoader loader = ((AbstractRegion) region).basicGetLoader();
Assert.assertTrue(scope.isDistributed());
if ((loader != null) && (!scope.isGlobal())) {
obj = doLocalLoad(loader, false);
event.setNewValue(obj);
Assert.assertTrue(obj != Token.INVALID && obj != Token.LOCAL_INVALID);
return;
}
if (scope.isGlobal()) {
Assert.assertTrue(this.lock == null);
Set loadCandidatesSet = advisor.adviseNetLoad();
if ((loader == null) && (loadCandidatesSet.isEmpty())) {
// no one has a data Loader. No point getting a lock
return;
}
this.lock = region.getDistributedLock(this.key);
boolean locked = false;
try {
final CancelCriterion stopper = region.getCancelCriterion();
for (; ; ) {
stopper.checkCancelInProgress(null);
boolean interrupted = Thread.interrupted();
try {
locked = this.lock.tryLock(region.getCache().getLockTimeout(), TimeUnit.SECONDS);
if (!locked) {
throw new TimeoutException(LocalizedStrings.SearchLoadAndWriteProcessor_TIMED_OUT_LOCKING_0_BEFORE_LOAD.toLocalizedString(key));
}
break;
} catch (InterruptedException ignore) {
interrupted = true;
region.getCancelCriterion().checkCancelInProgress(null);
// continue;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// for
if (loader == null) {
this.localLoad = false;
if (scope.isDistributed()) {
this.isSerialized = false;
obj = doNetLoad();
Assert.assertTrue(obj != Token.INVALID && obj != Token.LOCAL_INVALID);
if (this.isSerialized && obj != null) {
event.setSerializedNewValue((byte[]) obj);
} else {
event.setNewValue(obj);
}
}
} else {
obj = doLocalLoad(loader, false);
Assert.assertTrue(obj != Token.INVALID && obj != Token.LOCAL_INVALID);
event.setNewValue(obj);
}
return;
} finally {
// called on this processor
if (!locked)
this.lock = null;
}
}
if (scope.isDistributed()) {
// long start = System.currentTimeMillis();
obj = doNetLoad();
if (this.isSerialized && obj != null) {
event.setSerializedNewValue((byte[]) obj);
} else {
Assert.assertTrue(obj != Token.INVALID && obj != Token.LOCAL_INVALID);
event.setNewValue(obj);
}
// long end = System.currentTimeMillis();
}
}
use of org.apache.geode.cache.CacheLoader in project geode by apache.
the class CacheXmlParser method endCacheLoader.
/**
* When a <code>cache-loader</code> element is finished, the {@link Parameter}s and class names
* are popped off the stack. The cache loader is instantiated and initialized with the parameters,
* if appropriate. When the loader is being created in a dynamic-region-factory, there may be a
* disk-dir element on the stack, represented by a File object. Otherwise, dynamic-region-factory
* uses a RegionAttributesCreation, just like a region, and is treated the same.<p) The loader may
* also be created in the context of partition-attributes.
*/
private void endCacheLoader() {
Declarable d = createDeclarable();
if (!(d instanceof CacheLoader)) {
throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_CACHELOADER.toLocalizedString(d.getClass().getName()));
}
// Two peeks required to handle dynamic region context
Object a = stack.peek();
// check for disk-dir
if ((a instanceof File)) {
Object sav = stack.pop();
a = stack.peek();
if (!(a instanceof RegionAttributesCreation)) {
throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_CACHELOADER_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES.toLocalizedString());
}
stack.push(sav);
RegionAttributesCreation attrs = (RegionAttributesCreation) a;
attrs.setCacheLoader((CacheLoader) d);
} else // check for normal region-attributes
if (a instanceof RegionAttributesCreation) {
RegionAttributesCreation attrs = (RegionAttributesCreation) a;
attrs.setCacheLoader((CacheLoader) d);
} else {
throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES_OR_1.toLocalizedString(new Object[] { CACHE_LOADER, DYNAMIC_REGION_FACTORY }));
}
}
use of org.apache.geode.cache.CacheLoader in project geode by apache.
the class RegionAlterFunction method alterRegion.
private <K, V> Region<?, ?> alterRegion(Cache cache, RegionFunctionArgs regionAlterArgs) {
final String regionPathString = regionAlterArgs.getRegionPath();
RegionPath regionPath = new RegionPath(regionPathString);
AbstractRegion region = (AbstractRegion) cache.getRegion(regionPathString);
if (region == null) {
throw new IllegalArgumentException(CliStrings.format(CliStrings.ALTER_REGION__MSG__REGION_DOESNT_EXIST_0, new Object[] { regionPath }));
}
AttributesMutator mutator = region.getAttributesMutator();
if (regionAlterArgs.isCloningEnabled() != null) {
mutator.setCloningEnabled(regionAlterArgs.isCloningEnabled());
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - cloning");
}
}
if (regionAlterArgs.getEvictionMax() != null) {
mutator.getEvictionAttributesMutator().setMaximum(regionAlterArgs.getEvictionMax());
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - eviction attributes max");
}
}
// Alter expiration attributes
final RegionFunctionArgs.ExpirationAttrs newEntryExpirationIdleTime = regionAlterArgs.getEntryExpirationIdleTime();
if (newEntryExpirationIdleTime != null) {
mutator.setEntryIdleTimeout(parseExpirationAttributes(newEntryExpirationIdleTime, region.getEntryIdleTimeout()));
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - entry idle timeout");
}
}
final RegionFunctionArgs.ExpirationAttrs newEntryExpirationTTL = regionAlterArgs.getEntryExpirationTTL();
if (newEntryExpirationTTL != null) {
mutator.setEntryTimeToLive(parseExpirationAttributes(newEntryExpirationTTL, region.getEntryTimeToLive()));
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - entry TTL");
}
}
final RegionFunctionArgs.ExpirationAttrs newRegionExpirationIdleTime = regionAlterArgs.getRegionExpirationIdleTime();
if (newRegionExpirationIdleTime != null) {
mutator.setRegionIdleTimeout(parseExpirationAttributes(newRegionExpirationIdleTime, region.getRegionIdleTimeout()));
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - region idle timeout");
}
}
final RegionFunctionArgs.ExpirationAttrs newRegionExpirationTTL = regionAlterArgs.getRegionExpirationTTL();
if (newRegionExpirationTTL != null) {
mutator.setRegionTimeToLive(parseExpirationAttributes(newRegionExpirationTTL, region.getRegionTimeToLive()));
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - region TTL");
}
}
// Alter Gateway Sender Ids
final Set<String> newGatewaySenderIds = regionAlterArgs.getGatewaySenderIds();
if (newGatewaySenderIds != null) {
// Remove old gateway sender ids that aren't in the new list
Set<String> oldGatewaySenderIds = region.getGatewaySenderIds();
if (!oldGatewaySenderIds.isEmpty()) {
for (String gatewaySenderId : oldGatewaySenderIds) {
if (!newGatewaySenderIds.contains(gatewaySenderId)) {
mutator.removeGatewaySenderId(gatewaySenderId);
}
}
}
// Add new gateway sender ids that don't already exist
for (String gatewaySenderId : newGatewaySenderIds) {
if (!oldGatewaySenderIds.contains(gatewaySenderId)) {
mutator.addGatewaySenderId(gatewaySenderId);
}
}
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - gateway sender IDs");
}
}
// Alter Async Queue Ids
final Set<String> newAsyncEventQueueIds = regionAlterArgs.getAsyncEventQueueIds();
if (newAsyncEventQueueIds != null) {
// Remove old async event queue ids that aren't in the new list
Set<String> oldAsyncEventQueueIds = region.getAsyncEventQueueIds();
if (!oldAsyncEventQueueIds.isEmpty()) {
for (String asyncEventQueueId : oldAsyncEventQueueIds) {
if (!newAsyncEventQueueIds.contains(asyncEventQueueId)) {
mutator.removeAsyncEventQueueId(asyncEventQueueId);
}
}
}
// Add new async event queue ids that don't already exist
for (String asyncEventQueueId : newAsyncEventQueueIds) {
if (!oldAsyncEventQueueIds.contains(asyncEventQueueId)) {
mutator.addAsyncEventQueueId(asyncEventQueueId);
}
}
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - async event queue IDs");
}
}
// Alter Cache Listeners
final Set<String> newCacheListenerNames = regionAlterArgs.getCacheListeners();
if (newCacheListenerNames != null) {
// Remove old cache listeners that aren't in the new list
CacheListener[] oldCacheListeners = region.getCacheListeners();
for (CacheListener oldCacheListener : oldCacheListeners) {
if (!newCacheListenerNames.contains(oldCacheListener.getClass().getName())) {
mutator.removeCacheListener(oldCacheListener);
}
}
// Add new cache listeners that don't already exist
for (String newCacheListenerName : newCacheListenerNames) {
if (newCacheListenerName.isEmpty()) {
continue;
}
boolean nameFound = false;
for (CacheListener oldCacheListener : oldCacheListeners) {
if (oldCacheListener.getClass().getName().equals(newCacheListenerName)) {
nameFound = true;
break;
}
}
if (!nameFound) {
Class<CacheListener<K, V>> cacheListenerKlass = forName(newCacheListenerName, CliStrings.ALTER_REGION__CACHELISTENER);
mutator.addCacheListener(newInstance(cacheListenerKlass, CliStrings.ALTER_REGION__CACHELISTENER));
}
}
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - cache listeners");
}
}
final String cacheLoader = regionAlterArgs.getCacheLoader();
if (cacheLoader != null) {
if (cacheLoader.isEmpty()) {
mutator.setCacheLoader(null);
} else {
Class<CacheLoader<K, V>> cacheLoaderKlass = forName(cacheLoader, CliStrings.ALTER_REGION__CACHELOADER);
mutator.setCacheLoader(newInstance(cacheLoaderKlass, CliStrings.ALTER_REGION__CACHELOADER));
}
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - cache loader");
}
}
final String cacheWriter = regionAlterArgs.getCacheWriter();
if (cacheWriter != null) {
if (cacheWriter.isEmpty()) {
mutator.setCacheWriter(null);
} else {
Class<CacheWriter<K, V>> cacheWriterKlass = forName(cacheWriter, CliStrings.ALTER_REGION__CACHEWRITER);
mutator.setCacheWriter(newInstance(cacheWriterKlass, CliStrings.ALTER_REGION__CACHEWRITER));
}
if (logger.isDebugEnabled()) {
logger.debug("Region successfully altered - cache writer");
}
}
return region;
}
Aggregations