use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class HARQueueNewImplDUnitTest method verifyHaContainerType.
public static void verifyHaContainerType(Boolean isRegion, Integer port) {
try {
Map haMap = cache.getRegion(CacheServerImpl.generateNameForClientMsgsRegion(port.intValue()));
if (isRegion.booleanValue()) {
assertNotNull(haMap);
assertTrue(haMap instanceof LocalRegion);
haMap = ((CacheServerImpl) cache.getCacheServers().toArray()[0]).getAcceptor().getCacheClientNotifier().getHaContainer();
assertNotNull(haMap);
assertTrue(haMap instanceof HAContainerRegion);
} else {
assertNull(haMap);
haMap = ((CacheServerImpl) cache.getCacheServers().toArray()[0]).getAcceptor().getCacheClientNotifier().getHaContainer();
assertNotNull(haMap);
assertTrue(haMap instanceof HAContainerMap);
}
logger.fine("haContainer: " + haMap);
} catch (Exception e) {
fail("failed in verifyHaContainerType()" + e);
}
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class HAGIIDUnitTest method tombstonegc.
/** queue a tombstone GC message for the client. See bug #46832 */
public static void tombstonegc() throws Exception {
LocalRegion r = (LocalRegion) cache.getRegion("/" + REGION_NAME);
assertNotNull(r);
DistributedMember id = r.getCache().getDistributedSystem().getDistributedMember();
RegionEventImpl regionEvent = new RegionEventImpl(r, Operation.REGION_DESTROY, null, true, id);
FilterInfo clientRouting = r.getFilterProfile().getLocalFilterRouting(regionEvent);
assertTrue(clientRouting.getInterestedClients().size() > 0);
regionEvent.setLocalFilterInfo(clientRouting);
Map<VersionSource, Long> map = Collections.emptyMap();
ClientTombstoneMessage message = ClientTombstoneMessage.gc(r, map, new EventID(r.getCache().getDistributedSystem()));
CacheClientNotifier.notifyClients(regionEvent, message);
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testSkipConflictChecksForConcurrentOps.
/**
* Test that we skip conflict checks with entries that are on disk compared to entries that come
* in as part of a concurrent operation
*/
@Test
public void testSkipConflictChecksForConcurrentOps() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
// Create the region in few members to test recovery
createPersistentRegion(vm0);
createPersistentRegion(vm1);
// Create an update some entries in vm0 and vm1.
createData(vm0, 0, 1, "value1");
createData(vm0, 0, 1, "value2");
createData(vm0, 0, 1, "value2");
closeCache(vm1);
// Update the keys in vm0 until the entry version rolls over.
// This means that if we did a conflict check, vm0's key will have
// a lower entry version than vm1, which would cause us to prefer vm1's
// value
SerializableRunnable createData = new SerializableRunnable("rollEntryVersion") {
public void run() {
Cache cache = getCache();
LocalRegion region = (LocalRegion) cache.getRegion(REGION_NAME);
region.put(0, "value3");
RegionEntry entry = region.getRegionEntry(0);
entry = region.getRegionEntry(0);
// Sneak in and change the version number for an entry to generate
// a conflict.
VersionTag tag = entry.getVersionStamp().asVersionTag();
tag.setEntryVersion(tag.getEntryVersion() - 2);
entry.getVersionStamp().setVersions(tag);
}
};
vm0.invoke(createData);
// Add an observer to vm0 which will perform a concurrent operation during
// the GII. If we do a conflict check, this operation will be rejected
// because it will have a lower entry version that what vm1 recovered from
// disk
vm0.invoke(new SerializableRunnable() {
@Override
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage msg) {
if (msg instanceof InitialImageOperation.RequestImageMessage) {
if (((InitialImageOperation.RequestImageMessage) msg).regionPath.contains(REGION_NAME)) {
createData(vm0, 0, 1, "value4");
DistributionMessageObserver.setInstance(null);
}
}
}
});
}
});
// Create vm1, doing a GII
createPersistentRegion(vm1);
// If we did a conflict check, this would be value2
checkData(vm0, 0, 1, "value4");
checkData(vm1, 0, 1, "value4");
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method getRVV.
protected RegionVersionVector getRVV(VM vm) throws IOException, ClassNotFoundException {
SerializableCallable createData = new SerializableCallable("getRVV") {
public Object call() throws Exception {
Cache cache = getCache();
LocalRegion region = (LocalRegion) cache.getRegion(REGION_NAME);
RegionVersionVector rvv = region.getVersionVector();
rvv = rvv.getCloneForTransmission();
HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
// Using gemfire serialization because
// RegionVersionVector is not java serializable
DataSerializer.writeObject(rvv, hdos);
return hdos.toByteArray();
}
};
byte[] result = (byte[]) vm.invoke(createData);
ByteArrayInputStream bais = new ByteArrayInputStream(result);
return DataSerializer.readObject(new DataInputStream(bais));
}
use of org.apache.geode.internal.cache.LocalRegion in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testConflictChecksDuringConcurrentDeltaGIIAndOtherOp.
/**
* This test creates 2 VMs in a distributed system with a persistent PartitionedRegion and one VM
* (VM1) puts an entry in region. Second VM (VM2) starts later and does a delta GII. During Delta
* GII in VM2 a DESTROY operation happens in VM1 and gets propagated to VM2 concurrently with GII.
* At this point if entry version is greater than the once received from GII then it must not get
* applied. Which is Bug #45921.
*
*/
@Test
public void testConflictChecksDuringConcurrentDeltaGIIAndOtherOp() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(new CacheSerializableRunnable("Create PR and put an entry") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(attrs);
RegionAttributes rAttrs = factory.create();
Region region = cache.createRegionFactory(rAttrs).create("prRegion");
region.put("testKey", "testValue");
assertEquals(1, region.size());
}
});
// Create a cache and region, do an update to change the version no. and
// restart the cache and region.
vm1.invoke(new CacheSerializableRunnable("Create PR and put an entry") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
PartitionAttributes attrs = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(1).create();
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(attrs);
RegionAttributes rAttrs = factory.create();
Region region = cache.createRegionFactory(rAttrs).create("prRegion");
region.put("testKey", "testValue2");
cache.close();
// Restart
cache = getCache();
region = cache.createRegionFactory(rAttrs).create("prRegion");
}
});
// Do a DESTROY in vm0 when delta GII is in progress in vm1 (Hopefully, Not
// guaranteed).
AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Detroy entry in region") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.getRegion("prRegion");
while (!region.get("testKey").equals("testValue2")) {
Wait.pause(100);
}
region.destroy("testKey");
}
});
try {
async.join(3000);
} catch (InterruptedException e) {
new AssertionError("VM1 entry destroy did not finish in 3000 ms");
}
vm1.invoke(new CacheSerializableRunnable("Verifying entry version in new node VM1") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.getRegion("prRegion");
Region.Entry entry = ((PartitionedRegion) region).getEntry("testKey", true);
RegionEntry re = ((EntrySnapshot) entry).getRegionEntry();
LogWriterUtils.getLogWriter().fine("RegionEntry for testKey: " + re.getKey() + " " + re.getValueInVM((LocalRegion) region));
assertTrue(re.getValueInVM((LocalRegion) region) instanceof Tombstone);
VersionTag tag = re.getVersionStamp().asVersionTag();
assertEquals(3, /* Two puts and a Destroy */
tag.getEntryVersion());
}
});
closeCache(vm0);
closeCache(vm1);
}
Aggregations