use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForPersistentPRCreate.
public CacheSerializableRunnable getCacheSerializableRunnableForPersistentPRCreate(final String regionName, final int redundancy, final Class constraint) {
SerializableRunnable createPrRegion;
createPrRegion = new CacheSerializableRunnable(regionName) {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region partitionedregion = null;
try {
cache.createDiskStoreFactory().setDiskDirs(JUnit4CacheTestCase.getDiskDirs()).create("diskstore");
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(constraint);
attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
attr.setDiskStoreName("diskstore");
PartitionAttributesFactory paf = new PartitionAttributesFactory();
PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
attr.setPartitionAttributes(prAttr);
partitionedregion = cache.createRegion(regionName, attr.create());
} catch (IllegalStateException ex) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().warning("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException", ex);
}
assertNotNull("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region " + regionName + " not in cache", cache.getRegion(regionName));
assertNotNull("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null", partitionedregion);
assertTrue("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed", !partitionedregion.isDestroyed());
}
};
return (CacheSerializableRunnable) createPrRegion;
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class PRQueryDUnitTest method testDataLossDuringQueryProcessor.
/**
* Test data loss (bucket 0) while the PRQueryEvaluator is processing the query loop
*
* @throws Exception
*/
@Test
public void testDataLossDuringQueryProcessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 11;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
pr.put(new Integer(0), "zero");
pr.put(new Integer(1), "one");
pr.put(new Integer(2), "two");
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public boolean done = false;
public void hook(int spot) throws RuntimeException {
if (spot == 4) {
synchronized (this) {
if (done) {
return;
}
this.done = true;
}
datastore1.invoke(disconnectVM());
datastore2.invoke(disconnectVM());
}
}
}
;
final MyTestHook th = new MyTestHook();
// add expected exception strings
final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected");
try {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery("select distinct * from " + pr.getFullPath());
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < 3; i++) {
buckets.add(new Integer(i));
}
PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
qe.queryBuckets(th);
assertTrue(th.done);
assertTrue(false);
} catch (QueryException expected) {
assertTrue(th.done);
} finally {
ex.remove();
getCache().close();
}
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class ParallelGatewaySenderQueue method addShadowPartitionedRegionForUserPR.
public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR) {
if (logger.isDebugEnabled()) {
logger.debug("{} addShadowPartitionedRegionForUserPR: Attempting to create queue region: {}", this, userPR.getDisplayName());
}
this.sender.getLifeCycleLock().writeLock().lock();
PartitionedRegion prQ = null;
try {
String regionName = userPR.getFullPath();
// Find if there is any parent region for this userPR
// if there is then no need to add another q for the same
String leaderRegionName = ColocationHelper.getLeaderRegion(userPR).getFullPath();
if (!regionName.equals(leaderRegionName)) {
// colocation chain
if (!this.userRegionNameToshadowPRMap.containsKey(leaderRegionName)) {
addShadowPartitionedRegionForUserPR(ColocationHelper.getLeaderRegion(userPR));
}
return;
}
if (this.userRegionNameToshadowPRMap.containsKey(regionName))
return;
if (userPR.getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()) {
throw new GatewaySenderException(LocalizedStrings.ParallelGatewaySenderQueue_NON_PERSISTENT_GATEWAY_SENDER_0_CAN_NOT_BE_ATTACHED_TO_PERSISTENT_REGION_1.toLocalizedString(new Object[] { this.sender.getId(), userPR.getFullPath() }));
}
InternalCache cache = sender.getCache();
boolean isAccessor = (userPR.getLocalMaxMemory() == 0);
final String prQName = sender.getId() + QSTRING + convertPathToName(userPR.getFullPath());
prQ = (PartitionedRegion) cache.getRegion(prQName);
if (prQ == null) {
// TODO:REF:Avoid deprecated apis
AttributesFactory fact = new AttributesFactory();
fact.setConcurrencyChecksEnabled(false);
PartitionAttributesFactory pfact = new PartitionAttributesFactory();
pfact.setTotalNumBuckets(userPR.getTotalNumberOfBuckets());
pfact.setRedundantCopies(userPR.getRedundantCopies());
pfact.setColocatedWith(regionName);
// EITHER set localMaxMemory to 0 for accessor node
// OR override shadowPRs default local max memory with the sender's max
// queue memory (Fix for bug#44254)
int localMaxMemory = isAccessor ? 0 : sender.getMaximumQueueMemory();
pfact.setLocalMaxMemory(localMaxMemory);
pfact.setStartupRecoveryDelay(userPR.getPartitionAttributes().getStartupRecoveryDelay());
pfact.setRecoveryDelay(userPR.getPartitionAttributes().getRecoveryDelay());
if (sender.isPersistenceEnabled() && !isAccessor) {
fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
}
fact.setDiskStoreName(sender.getDiskStoreName());
// else set it to false
if (sender.isPersistenceEnabled())
fact.setDiskSynchronous(sender.isDiskSynchronous());
else {
fact.setDiskSynchronous(false);
}
// allow for no overflow directory
EvictionAttributes ea = EvictionAttributes.createLIFOMemoryAttributes(sender.getMaximumQueueMemory(), EvictionAction.OVERFLOW_TO_DISK);
fact.setEvictionAttributes(ea);
fact.setPartitionAttributes(pfact.create());
final RegionAttributes ra = fact.create();
if (logger.isDebugEnabled()) {
logger.debug("{}: Attempting to create queue region: {}", this, prQName);
}
ParallelGatewaySenderQueueMetaRegion meta = metaRegionFactory.newMetataRegion(cache, prQName, ra, sender);
try {
prQ = (PartitionedRegion) cache.createVMRegion(prQName, ra, new InternalRegionArguments().setInternalMetaRegion(meta).setDestroyLockFlag(true).setInternalRegion(true).setSnapshotInputStream(null).setImageTarget(null));
// at this point we should be able to assert prQ == meta;
// TODO This should not be set on the PR but on the GatewaySender
prQ.enableConflation(sender.isBatchConflationEnabled());
if (isAccessor)
// return from here if accessor node
return;
// Wait for buckets to be recovered.
prQ.shadowPRWaitForBucketRecovery();
} catch (IOException | ClassNotFoundException veryUnLikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), veryUnLikely);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Created queue region: {}", this, prQ);
}
} else {
if (isAccessor)
// return from here if accessor node
return;
// started from stop operation)
if (// HItesh:for first parallelGatewaySenderQueue only
this.index == 0)
handleShadowPRExistsScenario(cache, prQ);
}
} finally {
if (prQ != null) {
this.userRegionNameToshadowPRMap.put(userPR.getFullPath(), prQ);
}
/*
* Here, enqueueTempEvents need to be invoked when a sender is already running and userPR is
* created later. When the flow comes here through start() method of sender i.e. userPR
* already exists and sender is started later, the enqueueTempEvents is done in the start()
* method of ParallelGatewaySender
*/
if ((this.index == this.nDispatcher - 1) && this.sender.isRunning()) {
((AbstractGatewaySender) sender).enqueueTempEvents();
}
afterRegionAdd(userPR);
this.sender.getLifeCycleLock().writeLock().unlock();
}
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class ParallelGatewaySenderQueue method addShadowPartitionedRegionForUserRR.
public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) {
this.sender.getLifeCycleLock().writeLock().lock();
PartitionedRegion prQ = null;
if (logger.isDebugEnabled()) {
logger.debug("addShadowPartitionedRegionForUserRR: Going to create shadowpr for userRegion {}", userRegion.getFullPath());
}
try {
String regionName = userRegion.getFullPath();
if (this.userRegionNameToshadowPRMap.containsKey(regionName))
return;
InternalCache cache = sender.getCache();
final String prQName = getQueueName(sender.getId(), userRegion.getFullPath());
prQ = (PartitionedRegion) cache.getRegion(prQName);
if (prQ == null) {
// TODO:REF:Avoid deprecated apis
AttributesFactory fact = new AttributesFactory();
// Fix for 48621 - don't enable concurrency checks
// for queue buckets., event with persistence
fact.setConcurrencyChecksEnabled(false);
PartitionAttributesFactory pfact = new PartitionAttributesFactory();
pfact.setTotalNumBuckets(sender.getMaxParallelismForReplicatedRegion());
int localMaxMemory = userRegion.getDataPolicy().withStorage() ? sender.getMaximumQueueMemory() : 0;
pfact.setLocalMaxMemory(localMaxMemory);
// TODO:Kishor : THis need to be handled nicely
pfact.setRedundantCopies(3);
pfact.setPartitionResolver(new RREventIDResolver());
if (sender.isPersistenceEnabled()) {
fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
}
fact.setDiskStoreName(sender.getDiskStoreName());
// optimize with above check of enable persistence
if (sender.isPersistenceEnabled())
fact.setDiskSynchronous(sender.isDiskSynchronous());
else {
fact.setDiskSynchronous(false);
}
// allow for no overflow directory
EvictionAttributes ea = EvictionAttributes.createLIFOMemoryAttributes(sender.getMaximumQueueMemory(), EvictionAction.OVERFLOW_TO_DISK);
fact.setEvictionAttributes(ea);
fact.setPartitionAttributes(pfact.create());
final RegionAttributes ra = fact.create();
if (logger.isDebugEnabled()) {
logger.debug("{}: Attempting to create queue region: {}", this, prQName);
}
ParallelGatewaySenderQueueMetaRegion meta = new ParallelGatewaySenderQueueMetaRegion(prQName, ra, null, cache, sender);
try {
prQ = (PartitionedRegion) cache.createVMRegion(prQName, ra, new InternalRegionArguments().setInternalMetaRegion(meta).setDestroyLockFlag(true).setSnapshotInputStream(null).setImageTarget(null));
if (logger.isDebugEnabled()) {
logger.debug("Region created : {} partition Attributes : {}", prQ, prQ.getPartitionAttributes());
}
// TODO This should not be set on the PR but on the GatewaySender
prQ.enableConflation(sender.isBatchConflationEnabled());
// This is required in case of persistent PR and sender.
if (prQ.getLocalMaxMemory() != 0) {
Iterator<Integer> itr = prQ.getRegionAdvisor().getBucketSet().iterator();
while (itr.hasNext()) {
itr.next();
}
}
// In case of Replicated Region it may not be necessary.
} catch (IOException veryUnLikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), veryUnLikely);
} catch (ClassNotFoundException alsoUnlikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), alsoUnlikely);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Created queue region: {}", this, prQ);
}
} else {
// started from stop operation)
if (// HItesh: for first processor only
this.index == 0)
handleShadowPRExistsScenario(cache, prQ);
}
/*
* Here, enqueueTempEvents need to be invoked when a sender is already running and userPR is
* created later. When the flow comes here through start() method of sender i.e. userPR
* already exists and sender is started later, the enqueueTempEvents is done in the start()
* method of ParallelGatewaySender
*/
if ((this.index == this.nDispatcher - 1) && this.sender.isRunning()) {
((AbstractGatewaySender) sender).enqueueTempEvents();
}
} finally {
if (prQ != null) {
this.userRegionNameToshadowPRMap.put(userRegion.getFullPath(), prQ);
}
this.sender.getLifeCycleLock().writeLock().unlock();
}
}
use of org.apache.geode.cache.PartitionAttributesFactory in project geode by apache.
the class TXJUnitTest method testInternalRegionNotExposed.
/**
* make sure that we do not expose BucketRegion on transactionListener events
*
* @throws Exception
*/
@Test
public void testInternalRegionNotExposed() throws Exception {
TransactionListenerForRegionTest tl = new TransactionListenerForRegionTest();
CacheTransactionManager ctm = this.cache.getCacheTransactionManager();
ctm.addListener(tl);
CacheListenerForRegionTest cl = new CacheListenerForRegionTest();
AttributesFactory af = new AttributesFactory();
PartitionAttributes pa = new PartitionAttributesFactory().setRedundantCopies(0).setTotalNumBuckets(1).create();
af.setPartitionAttributes(pa);
af.addCacheListener(cl);
Region pr = this.cache.createRegion("testTxEventForRegion", af.create());
pr.put(2, "tw");
pr.put(3, "three");
pr.put(4, "four");
ctm.begin();
pr.put(1, "one");
pr.put(2, "two");
pr.invalidate(3);
pr.destroy(4);
ctm.commit();
assertFalse(tl.exceptionOccurred);
assertFalse(cl.exceptionOccurred);
}
Aggregations