use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class Bug39356DUnitTest method testCrashWhileCreatingABucket.
/**
* This tests the case where the VM forcing other VMs to create a bucket crashes while creating
* the bucket.
*/
@Test
public void testCrashWhileCreatingABucket() {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
SerializableRunnable createParReg = new SerializableRunnable("Create parReg") {
public void run() {
DistributionMessageObserver.setInstance(new MyRegionObserver(vm0));
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setRecoveryDelay(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
cache.createRegion(REGION_NAME, af.create());
}
};
vm1.invoke(createParReg);
vm2.invoke(createParReg);
SerializableRunnable createParRegAccessor = new SerializableRunnable("Create parReg") {
public void run() {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory pf = new PartitionAttributesFactory();
pf.setRedundantCopies(1);
pf.setLocalMaxMemory(0);
af.setDataPolicy(DataPolicy.PARTITION);
af.setPartitionAttributes(pf.create());
Region r = cache.createRegion(REGION_NAME, af.create());
// trigger the creation of a bucket, which should trigger the destruction of this VM.
try {
r.put("ping", "pong");
fail("Should have gotten a CancelException");
} catch (CancelException e) {
// this is ok, we expect our observer to close this cache.
}
}
};
vm0.invoke(createParRegAccessor);
SerializableRunnable verifyBuckets = new SerializableRunnable("Verify buckets") {
public void run() {
LogWriter log = org.apache.geode.test.dunit.LogWriterUtils.getLogWriter();
Cache cache = getCache();
PartitionedRegion r = (PartitionedRegion) cache.getRegion(REGION_NAME);
for (int i = 0; i < r.getAttributes().getPartitionAttributes().getTotalNumBuckets(); i++) {
List owners = null;
while (owners == null) {
try {
owners = r.getBucketOwnersForValidation(i);
} catch (ForceReattemptException e) {
log.info(Bug39356DUnitTest.class + " verify buckets Caught a ForceReattemptException");
Wait.pause(1000);
}
}
if (owners.isEmpty()) {
log.info("skipping bucket " + i + " because it has no data");
continue;
}
assertEquals("Expecting bucket " + i + " to have two copies", 2, owners.size());
log.info("bucket " + i + " had two copies");
}
}
};
vm1.invoke(verifyBuckets);
vm2.invoke(verifyBuckets);
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class TXLockServiceDUnitTest method testTXOriginatorRecoveryProcessor.
@Test
public void testTXOriginatorRecoveryProcessor() {
LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor]");
final int originatorVM = 0;
final int grantorVM = 1;
final int particpantA = 2;
final int particpantB = 3;
final List regionLockReqs = new ArrayList();
regionLockReqs.add(new TXRegionLockRequestImpl("/testTXOriginatorRecoveryProcessor", new HashSet(Arrays.asList(new String[] { "KEY-1", "KEY-2", "KEY-3", "KEY-4" }))));
// build participants set...
InternalDistributedMember dmId = null;
final Set participants = new HashSet();
for (int i = 1; i <= particpantB; i++) {
final int finalvm = i;
dmId = (InternalDistributedMember) Host.getHost(0).getVM(finalvm).invoke(() -> TXLockServiceDUnitTest.fetchDistributionManagerId());
assertEquals("dmId should not be null for vm " + finalvm, false, dmId == null);
participants.add(dmId);
}
// create grantor
LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor] grantorVM becomes grantor");
Host.getHost(0).getVM(grantorVM).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
Host.getHost(0).getVM(grantorVM).invoke(() -> TXLockServiceDUnitTest.identifyLockGrantor_DTLS());
Boolean isGrantor = (Boolean) Host.getHost(0).getVM(grantorVM).invoke(() -> TXLockServiceDUnitTest.isLockGrantor_DTLS());
assertEquals("isLockGrantor should not be false for DTLS", Boolean.TRUE, isGrantor);
// have a originatorVM get a txLock with three participants including grantor
LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor] originatorVM requests txLock");
Host.getHost(0).getVM(originatorVM).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
Host.getHost(0).getVM(originatorVM).invoke(new SerializableRunnable("[testTXOriginatorRecoveryProcessor] originatorVM requests txLock") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
testTXOriginatorRecoveryProcessor_TXLockId = dtls.txLock(regionLockReqs, participants);
assertNotNull("testTXOriginatorRecoveryProcessor_TXLockId is null", testTXOriginatorRecoveryProcessor_TXLockId);
}
});
// create dtls in each participant
Host.getHost(0).getVM(particpantA).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
Host.getHost(0).getVM(particpantB).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
// disconnect originatorVM without releasing txLock
/*
* doesn't currently trigger the DLockLessorDepatureHandler... TODO
* Host.getHost(0).getVM(originatorVM).invoke(new SerializableRunnable() { public void run() {
* TXLockService.destroyServices(); } });
*/
/*
* Host.getHost(0).getVM(originatorVM).invoke(new SerializableRunnable() { public void run() {
* InternalDistributedSystem sys = (InternalDistributedSystem)
* InternalDistributedSystem.getAnyInstance(); if (sys != null) { sys.disconnect(); } } });
*/
Host.getHost(0).getVM(originatorVM).invoke(new SerializableRunnable() {
public void run() {
TXLockService.destroyServices();
}
});
Host.getHost(0).getVM(originatorVM).invoke(() -> disconnectFromDS());
// grantor sends TXOriginatorRecoveryMessage...
// TODO: verify processing of message? and have test sleep until finished
sleep(200);
// verify txLock is released...
Host.getHost(0).getVM(particpantA).invoke(new SerializableRunnable("[testTXOriginatorRecoveryProcessor] verify txLock is released") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
testTXOriginatorRecoveryProcessor_TXLockId = dtls.txLock(regionLockReqs, participants);
assertNotNull("testTXOriginatorRecoveryProcessor_TXLockId is null", testTXOriginatorRecoveryProcessor_TXLockId);
}
});
Host.getHost(0).getVM(particpantA).invoke(new SerializableRunnable("[testTXOriginatorRecoveryProcessor] particpantA releases txLock") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
dtls.release(testTXOriginatorRecoveryProcessor_TXLockId);
}
});
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class TXLockServiceDUnitTest method testTXLock.
@Test
public void testTXLock() {
LogWriterUtils.getLogWriter().info("[testTXLock]");
final int grantorVM = 0;
final int clientA = 1;
final int clientB = 2;
final Set participants = Collections.EMPTY_SET;
final List regionLockReqs = new ArrayList();
regionLockReqs.add(new TXRegionLockRequestImpl("/testTXLock1", new HashSet(Arrays.asList(new String[] { "KEY-1", "KEY-2", "KEY-3", "KEY-4" }))));
regionLockReqs.add(new TXRegionLockRequestImpl("/testTXLock2", new HashSet(Arrays.asList(new String[] { "KEY-A", "KEY-B", "KEY-C", "KEY-D" }))));
// create grantor
LogWriterUtils.getLogWriter().info("[testTXLock] create grantor");
Host.getHost(0).getVM(grantorVM).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
sleep(20);
// create client and request txLock
LogWriterUtils.getLogWriter().info("[testTXLock] create clientA and request txLock");
Host.getHost(0).getVM(clientA).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
Host.getHost(0).getVM(clientA).invoke(new SerializableRunnable("[testTXLock] create clientA and request txLock") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
testTXLock_TXLockId = dtls.txLock(regionLockReqs, participants);
assertNotNull("testTXLock_TXLockId is null", testTXLock_TXLockId);
}
});
// create nuther client and request overlapping txLock... verify fails
LogWriterUtils.getLogWriter().info("[testTXLock] create clientB and fail txLock");
Host.getHost(0).getVM(clientB).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
Host.getHost(0).getVM(clientB).invoke(new SerializableRunnable() {
public void run() {
try {
TXLockService dtls = TXLockService.getDTLS();
dtls.txLock(regionLockReqs, participants);
fail("expected CommitConflictException");
} catch (CommitConflictException expected) {
}
}
});
/*
* try { Host.getHost(0).getVM(clientB).invoke(() -> TXLockServiceDUnitTest.txLock_DTLS(
* regionLockReqs, participants )); fail("expected CommitConflictException"); } catch
* (RMIException expected) { assertTrue(expected.getCause() instanceof CommitConflictException);
* }
*/
// release txLock
LogWriterUtils.getLogWriter().info("[testTXLock] clientA releases txLock");
Host.getHost(0).getVM(clientA).invoke(new SerializableRunnable("[testTXLock] clientA releases txLock") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
dtls.release(testTXLock_TXLockId);
}
});
sleep(20);
// try nuther client again and verify success
LogWriterUtils.getLogWriter().info("[testTXLock] clientB requests txLock");
Host.getHost(0).getVM(clientB).invoke(new SerializableRunnable("[testTXLock] clientB requests txLock") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
testTXLock_TXLockId = dtls.txLock(regionLockReqs, participants);
assertNotNull("testTXLock_TXLockId is null", testTXLock_TXLockId);
}
});
// release txLock
LogWriterUtils.getLogWriter().info("[testTXLock] clientB releases txLock");
Host.getHost(0).getVM(clientB).invoke(new SerializableRunnable("[testTXLock] clientB releases txLock") {
public void run() {
TXLockService dtls = TXLockService.getDTLS();
dtls.release(testTXLock_TXLockId);
}
});
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class TXLockServiceDUnitTest method testDTLSIsDistributed.
@Test
public void testDTLSIsDistributed() {
LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed]");
// have all vms lock and hold the same LTLS lock simultaneously
final Host host = Host.getHost(0);
int vmCount = host.getVMCount();
for (int vm = 0; vm < vmCount; vm++) {
final int finalvm = vm;
LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] testing vm " + finalvm);
Host.getHost(0).getVM(finalvm).invoke(new SerializableRunnable() {
public void run() {
TXLockService.createDTLS();
}
});
// assert that isDistributed returns false
Boolean isDistributed = (Boolean) host.getVM(finalvm).invoke(() -> TXLockServiceDUnitTest.isDistributed_DTLS());
assertEquals("isDistributed should be true for DTLS", Boolean.TRUE, isDistributed);
LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] isDistributed=" + isDistributed);
// lock a key...
Boolean gotLock = (Boolean) host.getVM(finalvm).invoke(() -> TXLockServiceDUnitTest.lock_DTLS("KEY"));
assertEquals("gotLock is false after calling lock_DTLS", Boolean.TRUE, gotLock);
LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] gotLock=" + gotLock);
// unlock it...
Boolean unlock = (Boolean) host.getVM(finalvm).invoke(() -> TXLockServiceDUnitTest.unlock_DTLS("KEY"));
assertEquals("unlock is false after calling unlock_DTLS", Boolean.TRUE, unlock);
LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] unlock=" + unlock);
}
}
use of org.apache.geode.test.dunit.SerializableRunnable in project geode by apache.
the class ShutdownAllDUnitTest method getCreatePRRunnable.
private SerializableRunnable getCreatePRRunnable(final String regionName, final String diskStoreName, final int redundancy) {
return new SerializableRunnable("create pr") {
@Override
public void run() {
final CountDownLatch recoveryDone;
if (redundancy > 0) {
recoveryDone = new CountDownLatch(1);
ResourceObserver observer = new InternalResourceManager.ResourceObserverAdapter() {
@Override
public void recoveryFinished(Region region) {
recoveryDone.countDown();
}
};
InternalResourceManager.setResourceObserver(observer);
} else {
recoveryDone = null;
}
Cache cache = ShutdownAllDUnitTest.this.getCache();
if (diskStoreName != null) {
DiskStore ds = cache.findDiskStore(diskStoreName);
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(diskStoreName);
}
}
AttributesFactory af = new AttributesFactory();
// use async to trigger flush
af.setDiskSynchronous(false);
af.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
af.setPartitionAttributes(paf.create());
if (diskStoreName != null) {
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName(diskStoreName);
} else {
af.setDataPolicy(DataPolicy.PARTITION);
}
cache.createRegion(regionName, af.create());
if (recoveryDone != null) {
try {
recoveryDone.await();
} catch (InterruptedException e) {
Assert.fail("Interrupted", e);
}
}
}
};
}
Aggregations