use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class PartitionedRepositoryManagerJUnitTest method setUp.
@Before
public void setUp() {
cache = Fakes.cache();
userRegion = Mockito.mock(PartitionedRegion.class);
userDataStore = Mockito.mock(PartitionedRegionDataStore.class);
when(userRegion.getDataStore()).thenReturn(userDataStore);
when(cache.getRegion("/testRegion")).thenReturn(userRegion);
serializer = new HeterogeneousLuceneSerializer(new String[] { "a", "b" });
DLockService lockService = mock(DLockService.class);
when(lockService.lock(any(), anyLong(), anyLong())).thenReturn(true);
DLockService.addLockServiceForTests(PartitionedRegionHelper.PARTITION_LOCK_SERVICE_NAME, lockService);
createIndexAndRepoManager();
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class TXLockServiceDUnitTest method checkDLockRecoverGrantorMessageProcessor.
// private synchronized void assertGrantorIsConsistent(Serializable id) {
// if (this.lockGrantor == null) {
// this.lockGrantor = id;
// } else {
// assertIndexDetailsEquals("assertGrantorIsConsistent failed", lockGrantor, id);
// }
// }
// private void distributedCreateService(int numVMs, String serviceName) {
// forEachVMInvoke(
// "remoteCreateService",
// new Object[] { serviceName });
//
// remoteCreateService(serviceName);
// }
private void checkDLockRecoverGrantorMessageProcessor() {
/*
* simple test to make sure getDLockRecoverGrantorMessageProcessor returns instance of
* TXRecoverGrantorMessageProcessor
*/
DLockService dlock = null;
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull("DTLS should not be null", dtls);
dlock = dtls.getInternalDistributedLockService();
assertEquals("DTLS should use TXRecoverGrantorMessageProcessor", true, dlock.getDLockRecoverGrantorMessageProcessor() instanceof TXRecoverGrantorMessageProcessor);
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class TXLockServiceDUnitTest method isDistributed_DTLS.
/**
* Accessed via reflection. DO NOT REMOVE
*/
protected static Boolean isDistributed_DTLS() {
TXLockService dtls = TXLockService.getDTLS();
boolean isDistributed = ((TXLockServiceImpl) dtls).getInternalDistributedLockService().isDistributed();
DLockService svc = ((TXLockServiceImpl) dtls).getInternalDistributedLockService();
assertNotNull(svc);
assertEquals("DTLS InternalDistributedLockService should not be destroyed", false, svc.isDestroyed());
if (true) {
DLockService service = DLockService.getInternalServiceNamed(svc.getName());
assertNotNull(service);
assertEquals("DTLS and DLock should both report same isDistributed result", true, isDistributed == service.isDistributed());
}
Boolean result = Boolean.valueOf(isDistributed);
logInfo("isDistributed_DTLS (hopefully true): " + result);
return result;
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class TXDistributedDUnitTest method testLockBatchParticipantsUpdate.
/**
* Test update of lock batch participants (needed when new members are discovered between a
* commit's locking phase and the applicatoin of the Region's data. See bug 32999
*/
@Test
public void testLockBatchParticipantsUpdate() throws Exception {
final String rgnName = getUniqueName();
Region rgn = getCache().createRegion(rgnName, getRegionAttributes());
rgn.create("key", null);
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableRunnable initRegions = new SerializableRunnable("testLockBatchParticipantsUpdate: initial configuration") {
public void run() {
try {
Region rgn1 = getCache().createRegion(rgnName, getRegionAttributes());
rgn1.create("key", null);
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
};
vm0.invoke(initRegions);
vm1.invoke(initRegions);
rgn.put("key", "val1");
// Connect vm2 also since it may have been shutdown when logPerTest
// is turned on
vm2.invoke(new SerializableRunnable("connect vm2 if not connected") {
public void run() {
getCache();
}
});
// Make VM0 the Grantor
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: remote grantor init") {
public void run() {
try {
Region rgn1 = getCache().getRegion(rgnName);
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
assertEquals("val1", rgn1.getEntry("key").getValue());
txMgr2.begin();
rgn1.put("key", "val2");
txMgr2.commit();
assertNotNull(TXLockService.getDTLS());
assertTrue(TXLockService.getDTLS().isLockGrantor());
} catch (CacheException e) {
fail("While performing first transaction");
}
}
});
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
assertEquals("val2", rgn.getEntry("key").getValue());
// Build sets of System Ids and set them up on VM0 for future batch member checks
HashSet txMembers = new HashSet(4);
txMembers.add(getSystemId());
txMembers.add(vm0.invoke(() -> TXDistributedDUnitTest.getSystemId()));
vm0.invoke(() -> TXDistributedDUnitTest.setPreTXSystemIds(txMembers));
txMembers.add(vm2.invoke(() -> TXDistributedDUnitTest.getSystemId()));
vm0.invoke(() -> TXDistributedDUnitTest.setPostTXSystemIds(txMembers));
// Don't include the tx host in the batch member set(s)
Serializable vm1HostId = (Serializable) vm1.invoke(() -> TXDistributedDUnitTest.getSystemId());
vm0.invoke(() -> TXDistributedDUnitTest.setTXHostSystemId(vm1HostId));
// Create a TX on VM1 (such that it will ask for locks on VM0) that uses the callbacks
// to pause and give us time to start a GII process on another VM
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: slow tx (one that detects new member)") {
public void run() {
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
PausibleTX pauseTXRunnable = new PausibleTX();
pauseTXRunnable.rgnName = rgnName;
pauseTXRunnable.myCache = getCache();
pauseTXRunnable.key = "key";
pauseTXRunnable.value = "val3";
new Thread(pauseTXRunnable, "PausibleTX Thread").start();
synchronized (PausibleTX.class) {
while (!pauseTXRunnable.getIsRunning()) {
try {
PausibleTX.class.wait();
} catch (InterruptedException ie) {
fail("Did not expect " + ie);
}
}
}
}
});
// Verify that the lock batch exists VM0 and has the size we expect
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Verify lock batch exists on VM0 with expected size") {
public void run() {
getCache().getRegion(rgnName);
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull(dtls);
assertTrue(dtls.isLockGrantor());
DLockService dLockSvc = dtls.getInternalDistributedLockService();
assertNotNull(TXDistributedDUnitTest.txHostId);
DLockBatch[] batches = dLockSvc.getGrantor().getLockBatches((InternalDistributedMember) TXDistributedDUnitTest.txHostId);
assertEquals(batches.length, 1);
TXLockBatch txLockBatch = (TXLockBatch) batches[0];
assertNotNull(txLockBatch);
assertNotNull(TXDistributedDUnitTest.preTXSystemIds);
assertTrue("Members in lock batch " + txLockBatch.getParticipants() + " not the same as " + TXDistributedDUnitTest.preTXSystemIds, txLockBatch.getParticipants().equals(TXDistributedDUnitTest.preTXSystemIds));
}
});
// Start a GII process on VM2
vm2.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: start GII") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setDataPolicy(DataPolicy.REPLICATE);
getCache().createRegion(rgnName, factory.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
});
// Notify TX on VM1 so that it can continue
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Notfiy VM1 TX to continue") {
public void run() {
synchronized (PausibleTX.class) {
// Notify VM1 that it should proceed to the TX send
PausibleTX.class.notifyAll();
// Wait until VM1 has sent the TX
try {
PausibleTX.class.wait();
} catch (InterruptedException ie) {
fail("Did not expect " + ie);
}
}
}
});
// Verify that the batch on VM0 has added VM2 into the set
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Verify lock batch contains VM2") {
public void run() {
getCache().getRegion(rgnName);
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull(dtls);
assertTrue(dtls.isLockGrantor());
DLockService dLockSvc = dtls.getInternalDistributedLockService();
assertNotNull(TXDistributedDUnitTest.txHostId);
DLockBatch[] batches = dLockSvc.getGrantor().getLockBatches((InternalDistributedMember) TXDistributedDUnitTest.txHostId);
assertEquals(batches.length, 1);
TXLockBatch txLockBatch = (TXLockBatch) batches[0];
assertNotNull(txLockBatch);
assertNotNull(TXDistributedDUnitTest.preTXSystemIds);
assertTrue("Members in lock batch " + txLockBatch.getParticipants() + " not the same as " + TXDistributedDUnitTest.postTXSystemIds, txLockBatch.getParticipants().equals(TXDistributedDUnitTest.postTXSystemIds));
}
});
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
assertEquals("val3", rgn.getEntry("key").getValue());
// Notify TX on VM1 that it can go ahead and complete the TX
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Notfiy VM1 TX to finish") {
public void run() {
synchronized (PausibleTX.class) {
// Notify VM1 that it should finish the TX
PausibleTX.class.notifyAll();
}
}
});
rgn.destroyRegion();
}
use of org.apache.geode.distributed.internal.locks.DLockService in project geode by apache.
the class DistributedLockServiceDUnitTest method becomeLockGrantor.
/**
* Accessed via reflection. DO NOT REMOVE.
*
* @param serviceName
*/
protected static void becomeLockGrantor(String serviceName) {
DLockService service = (DLockService) DistributedLockService.getServiceNamed(serviceName);
assertNotNull(service);
logInfo("About to call becomeLockGrantor...");
service.becomeLockGrantor();
}
Aggregations