use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class TXLockServiceDUnitTest method isDistributed_DTLS.
/**
* Accessed via reflection. DO NOT REMOVE
*/
protected static Boolean isDistributed_DTLS() {
TXLockService dtls = TXLockService.getDTLS();
boolean isDistributed = ((TXLockServiceImpl) dtls).getInternalDistributedLockService().isDistributed();
DLockService svc = ((TXLockServiceImpl) dtls).getInternalDistributedLockService();
assertNotNull(svc);
assertEquals("DTLS InternalDistributedLockService should not be destroyed", false, svc.isDestroyed());
if (true) {
DLockService service = DLockService.getInternalServiceNamed(svc.getName());
assertNotNull(service);
assertEquals("DTLS and DLock should both report same isDistributed result", true, isDistributed == service.isDistributed());
}
Boolean result = Boolean.valueOf(isDistributed);
logInfo("isDistributed_DTLS (hopefully true): " + result);
return result;
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class TXDistributedDUnitTest method testLockBatchParticipantsUpdate.
/**
* Test update of lock batch participants (needed when new members are discovered between a
* commit's locking phase and the applicatoin of the Region's data. See bug 32999
*/
@Test
public void testLockBatchParticipantsUpdate() throws Exception {
final String rgnName = getUniqueName();
Region rgn = getCache().createRegion(rgnName, getRegionAttributes());
rgn.create("key", null);
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableRunnable initRegions = new SerializableRunnable("testLockBatchParticipantsUpdate: initial configuration") {
public void run() {
try {
Region rgn1 = getCache().createRegion(rgnName, getRegionAttributes());
rgn1.create("key", null);
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
};
vm0.invoke(initRegions);
vm1.invoke(initRegions);
rgn.put("key", "val1");
// Connect vm2 also since it may have been shutdown when logPerTest
// is turned on
vm2.invoke(new SerializableRunnable("connect vm2 if not connected") {
public void run() {
getCache();
}
});
// Make VM0 the Grantor
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: remote grantor init") {
public void run() {
try {
Region rgn1 = getCache().getRegion(rgnName);
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
assertEquals("val1", rgn1.getEntry("key").getValue());
txMgr2.begin();
rgn1.put("key", "val2");
txMgr2.commit();
assertNotNull(TXLockService.getDTLS());
assertTrue(TXLockService.getDTLS().isLockGrantor());
} catch (CacheException e) {
fail("While performing first transaction");
}
}
});
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
assertEquals("val2", rgn.getEntry("key").getValue());
// Build sets of System Ids and set them up on VM0 for future batch member checks
HashSet txMembers = new HashSet(4);
txMembers.add(getSystemId());
txMembers.add(vm0.invoke(() -> TXDistributedDUnitTest.getSystemId()));
vm0.invoke(() -> TXDistributedDUnitTest.setPreTXSystemIds(txMembers));
txMembers.add(vm2.invoke(() -> TXDistributedDUnitTest.getSystemId()));
vm0.invoke(() -> TXDistributedDUnitTest.setPostTXSystemIds(txMembers));
// Don't include the tx host in the batch member set(s)
Serializable vm1HostId = (Serializable) vm1.invoke(() -> TXDistributedDUnitTest.getSystemId());
vm0.invoke(() -> TXDistributedDUnitTest.setTXHostSystemId(vm1HostId));
// Create a TX on VM1 (such that it will ask for locks on VM0) that uses the callbacks
// to pause and give us time to start a GII process on another VM
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: slow tx (one that detects new member)") {
public void run() {
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
PausibleTX pauseTXRunnable = new PausibleTX();
pauseTXRunnable.rgnName = rgnName;
pauseTXRunnable.myCache = getCache();
pauseTXRunnable.key = "key";
pauseTXRunnable.value = "val3";
new Thread(pauseTXRunnable, "PausibleTX Thread").start();
synchronized (PausibleTX.class) {
while (!pauseTXRunnable.getIsRunning()) {
try {
PausibleTX.class.wait();
} catch (InterruptedException ie) {
fail("Did not expect " + ie);
}
}
}
}
});
// Verify that the lock batch exists VM0 and has the size we expect
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Verify lock batch exists on VM0 with expected size") {
public void run() {
getCache().getRegion(rgnName);
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull(dtls);
assertTrue(dtls.isLockGrantor());
DLockService dLockSvc = dtls.getInternalDistributedLockService();
assertNotNull(TXDistributedDUnitTest.txHostId);
DLockBatch[] batches = dLockSvc.getGrantor().getLockBatches((InternalDistributedMember) TXDistributedDUnitTest.txHostId);
assertEquals(batches.length, 1);
TXLockBatch txLockBatch = (TXLockBatch) batches[0];
assertNotNull(txLockBatch);
assertNotNull(TXDistributedDUnitTest.preTXSystemIds);
assertTrue("Members in lock batch " + txLockBatch.getParticipants() + " not the same as " + TXDistributedDUnitTest.preTXSystemIds, txLockBatch.getParticipants().equals(TXDistributedDUnitTest.preTXSystemIds));
}
});
// Start a GII process on VM2
vm2.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: start GII") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setDataPolicy(DataPolicy.REPLICATE);
getCache().createRegion(rgnName, factory.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
});
// Notify TX on VM1 so that it can continue
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Notfiy VM1 TX to continue") {
public void run() {
synchronized (PausibleTX.class) {
// Notify VM1 that it should proceed to the TX send
PausibleTX.class.notifyAll();
// Wait until VM1 has sent the TX
try {
PausibleTX.class.wait();
} catch (InterruptedException ie) {
fail("Did not expect " + ie);
}
}
}
});
// Verify that the batch on VM0 has added VM2 into the set
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Verify lock batch contains VM2") {
public void run() {
getCache().getRegion(rgnName);
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull(dtls);
assertTrue(dtls.isLockGrantor());
DLockService dLockSvc = dtls.getInternalDistributedLockService();
assertNotNull(TXDistributedDUnitTest.txHostId);
DLockBatch[] batches = dLockSvc.getGrantor().getLockBatches((InternalDistributedMember) TXDistributedDUnitTest.txHostId);
assertEquals(batches.length, 1);
TXLockBatch txLockBatch = (TXLockBatch) batches[0];
assertNotNull(txLockBatch);
assertNotNull(TXDistributedDUnitTest.preTXSystemIds);
assertTrue("Members in lock batch " + txLockBatch.getParticipants() + " not the same as " + TXDistributedDUnitTest.postTXSystemIds, txLockBatch.getParticipants().equals(TXDistributedDUnitTest.postTXSystemIds));
}
});
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
assertEquals("val3", rgn.getEntry("key").getValue());
// Notify TX on VM1 that it can go ahead and complete the TX
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Notfiy VM1 TX to finish") {
public void run() {
synchronized (PausibleTX.class) {
// Notify VM1 that it should finish the TX
PausibleTX.class.notifyAll();
}
}
});
rgn.destroyRegion();
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class DistributedLockServiceDUnitTest method becomeLockGrantor.
/**
* Accessed via reflection. DO NOT REMOVE.
*
* @param serviceName
*/
protected static void becomeLockGrantor(String serviceName) {
DLockService service = (DLockService) DistributedLockService.getServiceNamed(serviceName);
assertNotNull(service);
logInfo("About to call becomeLockGrantor...");
service.becomeLockGrantor();
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class DistributedLockServiceDUnitTest method testLockQuery.
@Test
public void testLockQuery() throws Exception {
final String dlsName = getUniqueName();
final VM vmGrantor = Host.getHost(0).getVM(0);
final VM vm1 = Host.getHost(0).getVM(1);
final VM vm2 = Host.getHost(0).getVM(2);
final String key1 = "key1";
// vmGrantor creates grantor
vmGrantor.invoke(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vmGrantor creates grantor");
connectDistributedSystem();
DLockService dls = (DLockService) DistributedLockService.create(dlsName, getSystem());
assertTrue(dls.lock(key1, -1, -1));
assertTrue(dls.isLockGrantor());
dls.unlock(key1);
dls.freeResources(key1);
}
});
AsyncInvocation whileVM1Locks = null;
try {
// vm1 locks key1
whileVM1Locks = vm1.invokeAsync(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vm1 locks key1");
connectDistributedSystem();
DLockService dls = (DLockService) DistributedLockService.create(dlsName, getSystem());
assertTrue(dls.lock(key1, -1, -1));
assertFalse(dls.isLockGrantor());
try {
synchronized (testLockQuery_whileVM1Locks) {
testLockQuery_whileVM1Locks.set(true);
testLockQuery_whileVM1Locks.notifyAll();
long maxWait = 10000;
StopWatch timer = new StopWatch(true);
while (testLockQuery_whileVM1Locks.get()) {
// while true
long timeLeft = maxWait - timer.elapsedTimeMillis();
if (timeLeft > 0) {
testLockQuery_whileVM1Locks.wait(timeLeft);
} else {
fail("Test attempted to wait too long");
}
}
}
} catch (InterruptedException e) {
org.apache.geode.test.dunit.Assert.fail(e.getMessage(), e);
}
LogWriterUtils.getLogWriter().info("[testLockQuery] vm1 unlocks key1");
dls.unlock(key1);
dls.freeResources(key1);
}
});
// wait for vm1 to set testLockQuery_whileVM1Locks
// get DistributedMember for vm1
final DistributedMember vm1Member = (DistributedMember) vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
LogWriterUtils.getLogWriter().info("[testLockQuery] vm1 waits for locking thread");
synchronized (testLockQuery_whileVM1Locks) {
long maxWait = 10000;
StopWatch timer = new StopWatch(true);
while (!testLockQuery_whileVM1Locks.get()) {
// while false
long timeLeft = maxWait - timer.elapsedTimeMillis();
if (timeLeft > 0) {
testLockQuery_whileVM1Locks.wait(timeLeft);
} else {
fail("Test attempted to wait too long");
}
}
}
return getSystem().getDistributedMember();
}
});
assertNotNull(vm1Member);
// vmGrantor tests positive local dlock query
vmGrantor.invoke(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vmGrantor tests local query");
DLockService dls = (DLockService) DistributedLockService.getServiceNamed(dlsName);
DLockRemoteToken result = dls.queryLock(key1);
assertNotNull(result);
assertEquals(key1, result.getName());
assertTrue(result.getLeaseId() != -1);
assertEquals(Long.MAX_VALUE, result.getLeaseExpireTime());
RemoteThread lesseeThread = result.getLesseeThread();
assertNotNull(lesseeThread);
assertEquals(vm1Member, lesseeThread.getDistributedMember());
assertEquals(vm1Member, result.getLessee());
// nothing to test for on threadId unless we serialize info from vm1
}
});
// vm2 tests positive remote dlock query
vm2.invoke(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vm2 tests remote query");
connectDistributedSystem();
DLockService dls = (DLockService) DistributedLockService.create(dlsName, getSystem());
DLockRemoteToken result = dls.queryLock(key1);
assertNotNull(result);
assertEquals(key1, result.getName());
assertTrue(result.getLeaseId() != -1);
assertEquals(Long.MAX_VALUE, result.getLeaseExpireTime());
RemoteThread lesseeThread = result.getLesseeThread();
assertNotNull(lesseeThread);
assertEquals(vm1Member, lesseeThread.getDistributedMember());
assertEquals(vm1Member, result.getLessee());
// nothing to test for on threadId unless we serialize info from vm1
}
});
} finally {
// guarantee that testLockQuery_whileVM1Locks is notfied!
// vm1 sets and notifies testLockQuery_whileVM1Locks to release lock
vm1.invoke(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vm1 notifies/releases key1");
synchronized (testLockQuery_whileVM1Locks) {
testLockQuery_whileVM1Locks.set(false);
testLockQuery_whileVM1Locks.notifyAll();
}
}
});
ThreadUtils.join(whileVM1Locks, 10 * 1000);
if (whileVM1Locks.exceptionOccurred()) {
org.apache.geode.test.dunit.Assert.fail("Test failed", whileVM1Locks.getException());
}
}
// vmGrantor tests negative local dlock query
vmGrantor.invoke(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vmGrantor tests negative query");
DLockService dls = (DLockService) DistributedLockService.getServiceNamed(dlsName);
DLockRemoteToken result = dls.queryLock(key1);
assertNotNull(result);
assertEquals(key1, result.getName());
assertEquals(-1, result.getLeaseId());
assertEquals(0, result.getLeaseExpireTime());
assertNull(result.getLesseeThread());
assertNull(result.getLessee());
}
});
// vm2 tests negative remote dlock query
vm2.invoke(new SerializableRunnable() {
public void run() {
LogWriterUtils.getLogWriter().info("[testLockQuery] vm2 tests negative query");
DLockService dls = (DLockService) DistributedLockService.getServiceNamed(dlsName);
DLockRemoteToken result = dls.queryLock(key1);
assertNotNull(result);
assertEquals(key1, result.getName());
assertEquals(-1, result.getLeaseId());
assertEquals(0, result.getLeaseExpireTime());
assertNull(result.getLesseeThread());
assertNull(result.getLessee());
}
});
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class DistributedLockServiceDUnitTest method testSuspendLockingProhibitsLocking.
/**
* Test that exlusive locking prohibits locking activity
*/
@Test
public void testSuspendLockingProhibitsLocking() {
final String name = getUniqueName();
distributedCreateService(2, name);
DistributedLockService service = DistributedLockService.getServiceNamed(name);
// Should be able to lock from other VM
VM vm1 = Host.getHost(0).getVM(1);
assertTrue(vm1.invoke(() -> DistributedLockServiceDUnitTest.tryToLock(name)));
assertTrue(service.suspendLocking(1000));
// vm1 is the grantor... use debugHandleSuspendTimeouts
vm1.invoke(new SerializableRunnable("setDebugHandleSuspendTimeouts") {
public void run() {
DLockService dls = (DLockService) DistributedLockService.getServiceNamed(name);
assertTrue(dls.isLockGrantor());
DLockGrantor grantor = dls.getGrantorWithNoSync();
grantor.setDebugHandleSuspendTimeouts(5000);
}
});
// Shouldn't be able to lock a name from another VM
assertTrue(!vm1.invoke(() -> DistributedLockServiceDUnitTest.tryToLock(name)));
service.resumeLocking();
vm1.invoke(new SerializableRunnable("unsetDebugHandleSuspendTimeouts") {
public void run() {
DLockService dls = (DLockService) DistributedLockService.getServiceNamed(name);
assertTrue(dls.isLockGrantor());
DLockGrantor grantor = dls.getGrantorWithNoSync();
grantor.setDebugHandleSuspendTimeouts(0);
}
});
// Should be able to lock again
assertTrue(vm1.invoke(() -> DistributedLockServiceDUnitTest.tryToLock(name)));
}
Aggregations