use of org.apache.geode.cache.EvictionAttributes in project geode by apache.
the class CacheXml66DUnitTest method testPartitionedRegionAttributesForMemLruWithMaxMem.
@Test
public void testPartitionedRegionAttributesForMemLruWithMaxMem() throws Exception {
final int redundantCopies = 1;
final int maxMem = 25;
CacheCreation cache = new CacheCreation();
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
attrs.setStatisticsEnabled(true);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies);
paf.setTotalMaxMemory(500);
paf.setLocalMaxMemory(100);
AttributesFactory fac = new AttributesFactory(attrs);
fac.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(maxMem, null, EvictionAction.LOCAL_DESTROY));
fac.setPartitionAttributes(paf.create());
cache.createRegion("parRoot", fac.create());
testXml(cache);
Cache c = getCache();
assertNotNull(c);
Region region = c.getRegion("parRoot");
assertNotNull(region);
RegionAttributes regionAttrs = region.getAttributes();
PartitionAttributes pa = regionAttrs.getPartitionAttributes();
EvictionAttributes ea = regionAttrs.getEvictionAttributes();
assertEquals(pa.getRedundantCopies(), 1);
assertEquals(pa.getLocalMaxMemory(), 100);
assertEquals(pa.getTotalMaxMemory(), 500);
assertEquals(ea.getAlgorithm(), EvictionAlgorithm.LRU_MEMORY);
assertEquals(ea.getAction(), EvictionAction.LOCAL_DESTROY);
assertNotSame(ea.getMaximum(), maxMem);
assertEquals(ea.getMaximum(), pa.getLocalMaxMemory());
}
use of org.apache.geode.cache.EvictionAttributes in project geode by apache.
the class CacheXml66DUnitTest method testHeapLRUEviction.
@Test
public void testHeapLRUEviction() throws Exception {
final String name = getUniqueName();
beginCacheXml();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
EvictionAttributes ev = EvictionAttributes.createLRUHeapAttributes(null, EvictionAction.OVERFLOW_TO_DISK);
factory.setEvictionAttributes(ev);
// RegionAttributes atts = factory.create();
createRegion(name, factory.create());
finishCacheXml(this.temporaryFolder.getRoot(), getUniqueName(), getUseSchema(), getGemFireVersion());
Region r = getRootRegion().getSubregion(name);
EvictionAttributes hlea = r.getAttributes().getEvictionAttributes();
assertEquals(EvictionAction.OVERFLOW_TO_DISK, hlea.getAction());
}
use of org.apache.geode.cache.EvictionAttributes in project geode by apache.
the class SerialGatewaySenderQueue method initializeRegion.
/**
* Initializes the <code>Region</code> backing this queue. The <code>Region</code>'s scope is
* DISTRIBUTED_NO_ACK and mirror type is KEYS_VALUES and is set to overflow to disk based on the
* <code>GatewayQueueAttributes</code>.
*
* @param sender The GatewaySender <code>SerialGatewaySenderImpl</code>
* @param listener The GemFire <code>CacheListener</code>. The <code>CacheListener</code> can be
* null.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
private void initializeRegion(AbstractGatewaySender sender, CacheListener listener) {
final InternalCache gemCache = sender.getCache();
this.region = gemCache.getRegion(this.regionName);
if (this.region == null) {
AttributesFactory<Long, AsyncEvent> factory = new AttributesFactory<Long, AsyncEvent>();
factory.setScope(NO_ACK ? Scope.DISTRIBUTED_NO_ACK : Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(this.enablePersistence ? DataPolicy.PERSISTENT_REPLICATE : DataPolicy.REPLICATE);
if (logger.isDebugEnabled()) {
logger.debug("The policy of region is {}", (this.enablePersistence ? DataPolicy.PERSISTENT_REPLICATE : DataPolicy.REPLICATE));
}
// when the user of this queue is a secondary VM.
if (listener != null) {
factory.addCacheListener(listener);
}
// allow for no overflow directory
EvictionAttributes ea = EvictionAttributes.createLIFOMemoryAttributes(this.maximumQueueMemory, EvictionAction.OVERFLOW_TO_DISK);
factory.setEvictionAttributes(ea);
factory.setConcurrencyChecksEnabled(false);
factory.setDiskStoreName(this.diskStoreName);
// In case of persistence write to disk sync and in case of eviction write in async
factory.setDiskSynchronous(this.isDiskSynchronous);
// Create the region
if (logger.isDebugEnabled()) {
logger.debug("{}: Attempting to create queue region: {}", this, this.regionName);
}
final RegionAttributes<Long, AsyncEvent> ra = factory.create();
try {
SerialGatewaySenderQueueMetaRegion meta = new SerialGatewaySenderQueueMetaRegion(this.regionName, ra, null, gemCache, sender);
try {
this.region = gemCache.createVMRegion(this.regionName, ra, new InternalRegionArguments().setInternalMetaRegion(meta).setDestroyLockFlag(true).setSnapshotInputStream(null).setImageTarget(null).setIsUsedForSerialGatewaySenderQueue(true).setInternalRegion(true).setSerialGatewaySender(sender));
} catch (IOException veryUnLikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), veryUnLikely);
} catch (ClassNotFoundException alsoUnlikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), alsoUnlikely);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Created queue region: {}", this, this.region);
}
} catch (CacheException e) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_0_THE_QUEUE_REGION_NAMED_1_COULD_NOT_BE_CREATED, new Object[] { this, this.regionName }), e);
}
} else {
throw new IllegalStateException("Queue region " + this.region.getFullPath() + " already exists.");
}
}
use of org.apache.geode.cache.EvictionAttributes in project geode by apache.
the class ParallelGatewaySenderQueue method addShadowPartitionedRegionForUserPR.
public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR) {
if (logger.isDebugEnabled()) {
logger.debug("{} addShadowPartitionedRegionForUserPR: Attempting to create queue region: {}", this, userPR.getDisplayName());
}
this.sender.getLifeCycleLock().writeLock().lock();
PartitionedRegion prQ = null;
try {
String regionName = userPR.getFullPath();
// Find if there is any parent region for this userPR
// if there is then no need to add another q for the same
String leaderRegionName = ColocationHelper.getLeaderRegion(userPR).getFullPath();
if (!regionName.equals(leaderRegionName)) {
// colocation chain
if (!this.userRegionNameToshadowPRMap.containsKey(leaderRegionName)) {
addShadowPartitionedRegionForUserPR(ColocationHelper.getLeaderRegion(userPR));
}
return;
}
if (this.userRegionNameToshadowPRMap.containsKey(regionName))
return;
if (userPR.getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()) {
throw new GatewaySenderException(LocalizedStrings.ParallelGatewaySenderQueue_NON_PERSISTENT_GATEWAY_SENDER_0_CAN_NOT_BE_ATTACHED_TO_PERSISTENT_REGION_1.toLocalizedString(new Object[] { this.sender.getId(), userPR.getFullPath() }));
}
InternalCache cache = sender.getCache();
boolean isAccessor = (userPR.getLocalMaxMemory() == 0);
final String prQName = sender.getId() + QSTRING + convertPathToName(userPR.getFullPath());
prQ = (PartitionedRegion) cache.getRegion(prQName);
if (prQ == null) {
// TODO:REF:Avoid deprecated apis
AttributesFactory fact = new AttributesFactory();
fact.setConcurrencyChecksEnabled(false);
PartitionAttributesFactory pfact = new PartitionAttributesFactory();
pfact.setTotalNumBuckets(userPR.getTotalNumberOfBuckets());
pfact.setRedundantCopies(userPR.getRedundantCopies());
pfact.setColocatedWith(regionName);
// EITHER set localMaxMemory to 0 for accessor node
// OR override shadowPRs default local max memory with the sender's max
// queue memory (Fix for bug#44254)
int localMaxMemory = isAccessor ? 0 : sender.getMaximumQueueMemory();
pfact.setLocalMaxMemory(localMaxMemory);
pfact.setStartupRecoveryDelay(userPR.getPartitionAttributes().getStartupRecoveryDelay());
pfact.setRecoveryDelay(userPR.getPartitionAttributes().getRecoveryDelay());
if (sender.isPersistenceEnabled() && !isAccessor) {
fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
}
fact.setDiskStoreName(sender.getDiskStoreName());
// else set it to false
if (sender.isPersistenceEnabled())
fact.setDiskSynchronous(sender.isDiskSynchronous());
else {
fact.setDiskSynchronous(false);
}
// allow for no overflow directory
EvictionAttributes ea = EvictionAttributes.createLIFOMemoryAttributes(sender.getMaximumQueueMemory(), EvictionAction.OVERFLOW_TO_DISK);
fact.setEvictionAttributes(ea);
fact.setPartitionAttributes(pfact.create());
final RegionAttributes ra = fact.create();
if (logger.isDebugEnabled()) {
logger.debug("{}: Attempting to create queue region: {}", this, prQName);
}
ParallelGatewaySenderQueueMetaRegion meta = metaRegionFactory.newMetataRegion(cache, prQName, ra, sender);
try {
prQ = (PartitionedRegion) cache.createVMRegion(prQName, ra, new InternalRegionArguments().setInternalMetaRegion(meta).setDestroyLockFlag(true).setInternalRegion(true).setSnapshotInputStream(null).setImageTarget(null));
// at this point we should be able to assert prQ == meta;
// TODO This should not be set on the PR but on the GatewaySender
prQ.enableConflation(sender.isBatchConflationEnabled());
if (isAccessor)
// return from here if accessor node
return;
// Wait for buckets to be recovered.
prQ.shadowPRWaitForBucketRecovery();
} catch (IOException | ClassNotFoundException veryUnLikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), veryUnLikely);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Created queue region: {}", this, prQ);
}
} else {
if (isAccessor)
// return from here if accessor node
return;
// started from stop operation)
if (// HItesh:for first parallelGatewaySenderQueue only
this.index == 0)
handleShadowPRExistsScenario(cache, prQ);
}
} finally {
if (prQ != null) {
this.userRegionNameToshadowPRMap.put(userPR.getFullPath(), prQ);
}
/*
* Here, enqueueTempEvents need to be invoked when a sender is already running and userPR is
* created later. When the flow comes here through start() method of sender i.e. userPR
* already exists and sender is started later, the enqueueTempEvents is done in the start()
* method of ParallelGatewaySender
*/
if ((this.index == this.nDispatcher - 1) && this.sender.isRunning()) {
((AbstractGatewaySender) sender).enqueueTempEvents();
}
afterRegionAdd(userPR);
this.sender.getLifeCycleLock().writeLock().unlock();
}
}
use of org.apache.geode.cache.EvictionAttributes in project geode by apache.
the class ParallelGatewaySenderQueue method addShadowPartitionedRegionForUserRR.
public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) {
this.sender.getLifeCycleLock().writeLock().lock();
PartitionedRegion prQ = null;
if (logger.isDebugEnabled()) {
logger.debug("addShadowPartitionedRegionForUserRR: Going to create shadowpr for userRegion {}", userRegion.getFullPath());
}
try {
String regionName = userRegion.getFullPath();
if (this.userRegionNameToshadowPRMap.containsKey(regionName))
return;
InternalCache cache = sender.getCache();
final String prQName = getQueueName(sender.getId(), userRegion.getFullPath());
prQ = (PartitionedRegion) cache.getRegion(prQName);
if (prQ == null) {
// TODO:REF:Avoid deprecated apis
AttributesFactory fact = new AttributesFactory();
// Fix for 48621 - don't enable concurrency checks
// for queue buckets., event with persistence
fact.setConcurrencyChecksEnabled(false);
PartitionAttributesFactory pfact = new PartitionAttributesFactory();
pfact.setTotalNumBuckets(sender.getMaxParallelismForReplicatedRegion());
int localMaxMemory = userRegion.getDataPolicy().withStorage() ? sender.getMaximumQueueMemory() : 0;
pfact.setLocalMaxMemory(localMaxMemory);
// TODO:Kishor : THis need to be handled nicely
pfact.setRedundantCopies(3);
pfact.setPartitionResolver(new RREventIDResolver());
if (sender.isPersistenceEnabled()) {
fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
}
fact.setDiskStoreName(sender.getDiskStoreName());
// optimize with above check of enable persistence
if (sender.isPersistenceEnabled())
fact.setDiskSynchronous(sender.isDiskSynchronous());
else {
fact.setDiskSynchronous(false);
}
// allow for no overflow directory
EvictionAttributes ea = EvictionAttributes.createLIFOMemoryAttributes(sender.getMaximumQueueMemory(), EvictionAction.OVERFLOW_TO_DISK);
fact.setEvictionAttributes(ea);
fact.setPartitionAttributes(pfact.create());
final RegionAttributes ra = fact.create();
if (logger.isDebugEnabled()) {
logger.debug("{}: Attempting to create queue region: {}", this, prQName);
}
ParallelGatewaySenderQueueMetaRegion meta = new ParallelGatewaySenderQueueMetaRegion(prQName, ra, null, cache, sender);
try {
prQ = (PartitionedRegion) cache.createVMRegion(prQName, ra, new InternalRegionArguments().setInternalMetaRegion(meta).setDestroyLockFlag(true).setSnapshotInputStream(null).setImageTarget(null));
if (logger.isDebugEnabled()) {
logger.debug("Region created : {} partition Attributes : {}", prQ, prQ.getPartitionAttributes());
}
// TODO This should not be set on the PR but on the GatewaySender
prQ.enableConflation(sender.isBatchConflationEnabled());
// This is required in case of persistent PR and sender.
if (prQ.getLocalMaxMemory() != 0) {
Iterator<Integer> itr = prQ.getRegionAdvisor().getBucketSet().iterator();
while (itr.hasNext()) {
itr.next();
}
}
// In case of Replicated Region it may not be necessary.
} catch (IOException veryUnLikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), veryUnLikely);
} catch (ClassNotFoundException alsoUnlikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), alsoUnlikely);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Created queue region: {}", this, prQ);
}
} else {
// started from stop operation)
if (// HItesh: for first processor only
this.index == 0)
handleShadowPRExistsScenario(cache, prQ);
}
/*
* Here, enqueueTempEvents need to be invoked when a sender is already running and userPR is
* created later. When the flow comes here through start() method of sender i.e. userPR
* already exists and sender is started later, the enqueueTempEvents is done in the start()
* method of ParallelGatewaySender
*/
if ((this.index == this.nDispatcher - 1) && this.sender.isRunning()) {
((AbstractGatewaySender) sender).enqueueTempEvents();
}
} finally {
if (prQ != null) {
this.userRegionNameToshadowPRMap.put(userRegion.getFullPath(), prQ);
}
this.sender.getLifeCycleLock().writeLock().unlock();
}
}
Aggregations