use of org.apache.geode.LogWriter in project geode by apache.
the class MultiVMRegionTestCase method testNBRegionInvalidationDuringGetInitialImage.
@Ignore("TODO: test is disabled for 51542")
@Test
public void testNBRegionInvalidationDuringGetInitialImage() throws Exception {
assumeTrue(supportsReplication());
disconnectAllFromDS();
// don't run this for noAck, too many race conditions
if (getRegionAttributes().getScope().isDistributedNoAck())
return;
final String name = this.getUniqueName();
final byte[][] values = new byte[NB1_NUM_ENTRIES][];
for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
values[i] = new byte[NB1_VALUE_SIZE];
Arrays.fill(values[i], (byte) 0x42);
}
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm2 = host.getVM(2);
SerializableRunnable create = new CacheSerializableRunnable("Create Mirrored Region") {
@Override
public void run2() throws CacheException {
beginCacheXml();
{
// root region must be DACK because its used to sync up async subregions
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.NORMAL);
factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
createRootRegion(factory.create());
}
{
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
factory.setDataPolicy(DataPolicy.REPLICATE);
createRegion(name, factory.create());
}
finishCacheXml(name);
// reset slow
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
};
vm0.invoke(new CacheSerializableRunnable("Create Nonmirrored Region") {
@Override
public void run2() throws CacheException {
{
// root region must be DACK because its used to sync up async subregions
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.EMPTY);
createRootRegion(factory.create());
}
{
AttributesFactory factory = new AttributesFactory(getRegionAttributes());
factory.setDataPolicy(DataPolicy.REPLICATE);
createRegion(name, factory.create());
}
// reset slow
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
});
vm0.invoke(new CacheSerializableRunnable("Put initial data") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
region.put(new Integer(i), values[i]);
}
assertEquals(NB1_NUM_ENTRIES, region.keySet().size());
}
});
// attachDebugger(vm0, "vm0");
// attachDebugger(vm2, "vm2");
// start asynchronous process that does updates to the data
AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Do Nonblocking Operations") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
// wait for profile of getInitialImage cache to show up
final org.apache.geode.internal.cache.CacheDistributionAdvisor adv = ((org.apache.geode.internal.cache.DistributedRegion) region).getCacheDistributionAdvisor();
final int expectedProfiles = 1;
WaitCriterion ev = new WaitCriterion() {
@Override
public boolean done() {
return adv.adviseReplicates().size() >= expectedProfiles;
}
@Override
public String description() {
return "profile count never reached " + expectedProfiles;
}
};
Wait.waitForCriterion(ev, 30 * 1000, 200, true);
// before the get initial image is complete.
for (int i = 1; i < NB1_NUM_ENTRIES; i += 2) {
// as before
if (i == 301) {
// DebuggerSupport.waitForJavaDebugger(getLogWriter(), "About to invalidate
// region");
// wait for previous updates to be processed
flushIfNecessary(region);
region.invalidateRegion();
flushIfNecessary(region);
}
Object key = new Integer(i);
switch(i % 6) {
case // UPDATE
1:
// use the current timestamp so we know when it happened
// we could have used last modification timestamps, but
// this works without enabling statistics
Object value = new Long(System.currentTimeMillis());
region.put(key, value);
// }
break;
case // INVALIDATE
3:
region.invalidate(key);
if (getRegionAttributes().getScope().isDistributedAck()) {
// do a nonblocking netSearch
value = region.get(key);
assertNull("Expected null value for key: " + i + " but got " + value, value);
}
break;
case // DESTROY
5:
region.destroy(key);
if (getRegionAttributes().getScope().isDistributedAck()) {
// do a nonblocking netSearch
assertNull(region.get(key));
}
break;
default:
fail("unexpected modulus result: " + i);
break;
}
}
// now do a put and our DACK root region which will not complete
// until processed on otherside which means everything done before this
// point has been processed
getRootRegion().put("DONE", "FLUSH_OPS");
}
});
// slow down image processing to make it more likely to get async updates
if (!getRegionAttributes().getScope().isGlobal()) {
vm2.invoke(new SerializableRunnable("Set slow image processing") {
@Override
public void run() {
// make sure the cache is set up before turning on slow
// image processing
getRootRegion();
// if this is a no_ack test, then we need to slow down more because of the
// pauses in the nonblocking operations
int pause = /* getRegionAttributes().getScope().isAck() ? */
100;
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = pause;
}
});
}
AsyncInvocation asyncGII = vm2.invokeAsync(create);
if (!getRegionAttributes().getScope().isGlobal()) {
// wait for nonblocking operations to complete
try {
ThreadUtils.join(async, 30 * 1000);
} finally {
vm2.invoke(new SerializableRunnable("Set fast image processing") {
@Override
public void run() {
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
});
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
}
// wait for GII to complete
ThreadUtils.join(asyncGII, 30 * 1000);
final long iiComplete = System.currentTimeMillis();
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
if (getRegionAttributes().getScope().isGlobal()) {
// wait for nonblocking operations to complete
ThreadUtils.join(async, 30 * 1000);
}
if (asyncGII.exceptionOccurred()) {
throw new Error("asyncGII failed", asyncGII.getException());
}
if (async.exceptionOccurred()) {
throw new Error("async failed", async.getException());
}
// Locally destroy the region in vm0 so we know that they are not found by
// a netSearch
vm0.invoke(new CacheSerializableRunnable("Locally destroy region") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.localDestroyRegion();
}
});
// invoke repeating so noack regions wait for all updates to get processed
vm2.invokeRepeatingIfNecessary(new CacheSerializableRunnable("Verify entryCount") {
private boolean entriesDumped = false;
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
// expected entry count (subtract entries destroyed)
int entryCount = NB1_NUM_ENTRIES - NB1_NUM_ENTRIES / 6;
int actualCount = region.entrySet(false).size();
if (actualCount == NB1_NUM_ENTRIES) {
// entries not destroyed, dump entries that were supposed to have been destroyed
dumpDestroyedEntries(region);
}
assertEquals(entryCount, actualCount);
}
private void dumpDestroyedEntries(Region region) throws EntryNotFoundException {
if (entriesDumped)
return;
entriesDumped = true;
LogWriter logger = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
logger.info("DUMPING Entries with values in VM that should have been destroyed:");
for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
logger.info(i + "-->" + ((org.apache.geode.internal.cache.LocalRegion) region).getValueInVM(new Integer(i)));
}
}
}, 3000);
vm2.invoke(new CacheSerializableRunnable("Verify keys/values & Nonblocking") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
// expected entry count (subtract entries destroyed)
int entryCount = NB1_NUM_ENTRIES - NB1_NUM_ENTRIES / 6;
assertEquals(entryCount, region.entrySet(false).size());
// determine how many entries were updated before getInitialImage
// was complete
int numConcurrent = 0;
for (int i = 0; i < NB1_NUM_ENTRIES; i++) {
Region.Entry entry = region.getEntry(new Integer(i));
if (i < 301) {
if (i % 6 == 5) {
// destroyed
assertNull("Expected entry for " + i + " to be destroyed but it is " + entry, entry);
} else {
assertNotNull(entry);
Object v = entry.getValue();
assertNull("Expected value for " + i + " to be null, but was " + v, v);
}
} else {
Object v = entry == null ? null : entry.getValue();
switch(i % 6) {
// even keys are originals
case 0:
case 2:
case 4:
assertNotNull(entry);
assertNull("Expected value for " + i + " to be null, but was " + v, v);
break;
case // updated
1:
assertNotNull("Expected to find an entry for #" + i, entry);
assertNotNull("Expected to find a value for #" + i, v);
assertTrue("Value for key " + i + " is not a Long, is a " + v.getClass().getName(), v instanceof Long);
Long timestamp = (Long) entry.getValue();
if (timestamp.longValue() < iiComplete) {
numConcurrent++;
}
break;
case // invalidated
3:
assertNotNull("Expected to find an entry for #" + i, entry);
assertNull("Expected value for " + i + " to be null, but was " + v, v);
break;
case // destroyed
5:
assertNull("Expected to not find an entry for #" + i, entry);
break;
default:
fail("unexpected modulus result: " + (i % 6));
break;
}
}
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage");
// make sure at least some of them were concurrent
if (getRegionAttributes().getScope().isGlobal()) {
assertTrue("Too many concurrent updates when expected to block: " + numConcurrent, numConcurrent < 300);
} else {
assertTrue("Not enough updates concurrent with getInitialImage occurred to my liking. " + numConcurrent + " entries out of " + entryCount + " were updated concurrently with getInitialImage, and I'd expect at least 50 or so", numConcurrent >= 30);
}
}
});
}
use of org.apache.geode.LogWriter in project geode by apache.
the class SystemFailureDUnitTest method doCreateEntry.
protected void doCreateEntry(String name) {
LogWriter log = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
log.info("<ExpectedException action=add>" + "dunit.RMIException" + "</ExpectedException>");
Object[] args = new Object[] { name };
Host host = Host.getHost(0);
VM vm = host.getVM(0);
try {
vm.invoke(this.getClass(), "createEntry", args);
} catch (RMIException e) {
// expected
}
log.info("<ExpectedException action=add>" + "dunit.RMIException" + "</ExpectedException>");
}
use of org.apache.geode.LogWriter in project geode by apache.
the class TestFunction method execute5.
public void execute5(FunctionContext context) {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
LogWriter logger = ds.getLogWriter();
logger.info("Executing executeException in TestFunction on Member : " + ds.getDistributedMember() + "with Context : " + context);
if (this.hasResult()) {
if (context.getArguments() instanceof String) {
context.getResultSender().lastResult("Success");
} else {
context.getResultSender().lastResult("Failure");
}
}
}
use of org.apache.geode.LogWriter in project geode by apache.
the class FireAndForgetFunctionOnAllServers method execute.
@Override
public void execute(FunctionContext context) {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
LogWriter logger = ds.getLogWriter();
Cache cache = CacheFactory.getAnyInstance();
String regionName = (String) context.getArguments();
Region<String, Integer> region1 = cache.getRegion(regionName);
if (region1 == null) {
RegionFactory<String, Integer> rf;
rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
region1 = rf.create(regionName);
}
region1.put(ds.getDistributedMember().toString(), 1);
logger.info("Executing FireAndForgetFunctionOnAllServers on Member : " + ds.getDistributedMember() + " with Context : " + context);
if (!hasResult()) {
return;
}
}
use of org.apache.geode.LogWriter in project geode by apache.
the class TestFunction method executeWithThrowException.
private void executeWithThrowException(FunctionContext context) {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
RegionFunctionContext rfContext = (RegionFunctionContext) context;
LogWriter logger = ds.getLogWriter();
logger.fine("Executing executeWithThrowException in TestFunction on Member : " + ds.getDistributedMember() + "with Context : " + context);
if (context.getArguments() instanceof Boolean) {
logger.fine("MyFunctionExecutionException Exception is intentionally thrown");
throw new MyFunctionExecutionException("I have been thrown from TestFunction");
} else if (rfContext.getArguments() instanceof Set) {
Set origKeys = (Set) rfContext.getArguments();
for (Iterator i = origKeys.iterator(); i.hasNext(); ) {
Region r = PartitionRegionHelper.getLocalDataForContext(rfContext);
Object key = i.next();
Object val = r.get(key);
if (val != null) {
throw new MyFunctionExecutionException("I have been thrown from TestFunction");
}
}
} else {
logger.fine("Result sent back :" + Boolean.FALSE);
rfContext.getResultSender().lastResult(Boolean.FALSE);
}
}
Aggregations