use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class ExportLogsCommandTest method sizeFromAllMembers_greaterThanLocalDiskAvailable_shouldReturnErrorResult.
@Test
public void sizeFromAllMembers_greaterThanLocalDiskAvailable_shouldReturnErrorResult() throws Exception {
final InternalCache mockCache = mock(InternalCache.class);
final ExportLogsCommand realCmd = new ExportLogsCommand();
ExportLogsCommand spyCmd = spy(realCmd);
String start = null;
String end = null;
String logLevel = null;
boolean onlyLogLevel = false;
boolean logsOnly = false;
boolean statsOnly = false;
InternalDistributedMember member1 = new InternalDistributedMember("member1", 12345);
InternalDistributedMember member2 = new InternalDistributedMember("member2", 98765);
member1.getNetMember().setName("member1");
member2.getNetMember().setName("member2");
Set<DistributedMember> testMembers = new HashSet<>();
testMembers.add(member1);
testMembers.add(member2);
ResultCollector testResults1 = new CustomCollector();
testResults1.addResult(member1, Arrays.asList(75 * MEGABYTE));
ResultCollector testResults2 = new CustomCollector();
testResults2.addResult(member2, Arrays.asList(60 * MEGABYTE));
doReturn(mockCache).when(spyCmd).getCache();
doReturn(testMembers).when(spyCmd).getMembers(null, null);
doReturn(testResults1).when(spyCmd).estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member1));
doReturn(testResults2).when(spyCmd).estimateLogSize(Matchers.any(SizeExportLogsFunction.Args.class), eq(member2));
doReturn(125 * MEGABYTE).when(spyCmd).getLocalDiskAvailable();
doReturn(GIGABYTE).when(spyCmd).getLocalDiskSize();
Result res = spyCmd.exportLogs("working dir", null, null, logLevel, onlyLogLevel, false, start, end, logsOnly, statsOnly, "125m");
assertThat(res.getStatus()).isEqualTo(Result.Status.ERROR);
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistributedAckRegionCCEDUnitTest method testConcurrentOpWithGII.
/**
* test for bug #45564. a create() is received by region creator and then a later destroy() is
* received in initial image and while the version info from the destroy is recorded we keep the
* value from the create event
*/
@Test
public void testConcurrentOpWithGII() {
if (this.getClass() != DistributedAckRegionCCEDUnitTest.class) {
// not really a scope-related thing
return;
}
final String name = this.getUniqueName() + "-CC";
final String key = "mykey";
VM vm1 = Host.getHost(0).getVM(1);
VM vm2 = Host.getHost(0).getVM(2);
// create some destroyed entries so the GC service is populated
SerializableCallable create = new SerializableCallable("create region") {
public Object call() {
RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
CCRegion = (LocalRegion) f.create(name);
return CCRegion.getDistributionManager().getDistributionManagerId();
}
};
// do conflicting update() and destroy() on the region. We want the update() to
// be sent with a message and the destroy() to be transferred in the initial image
// and be the value that we want to keep
InternalDistributedMember vm1ID = (InternalDistributedMember) vm1.invoke(create);
AsyncInvocation partialCreate = vm2.invokeAsync(new SerializableCallable("create region with stall") {
public Object call() throws Exception {
final GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
RegionFactory f = cache.createRegionFactory(getRegionAttributes());
InitialImageOperation.VMOTION_DURING_GII = true;
// this will stall region creation at the point of asking for an initial image
VMotionObserverHolder.setInstance(new VMotionObserver() {
@Override
public void vMotionBeforeCQRegistration() {
}
@Override
public void vMotionBeforeRegisterInterest() {
}
@Override
public void vMotionDuringGII(Set recipientSet, LocalRegion region) {
InitialImageOperation.VMOTION_DURING_GII = false;
int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
LocalRegion ccregion = cache.getRegionByPath("/" + name);
try {
// happen
while (!ccregion.isDestroyed() && ccregion.getRegionEntry(key) == null) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
return;
}
}
} finally {
LocalRegion.setThreadInitLevelRequirement(oldLevel);
}
}
});
try {
CCRegion = (LocalRegion) f.create(name);
// at this point we should have received the update op and then the GII, which should
// overwrite
// the conflicting update op
assertFalse("expected initial image transfer to destroy entry", CCRegion.containsKey(key));
} finally {
InitialImageOperation.VMOTION_DURING_GII = false;
}
return null;
}
});
vm1.invoke(new SerializableRunnable("create conflicting events") {
public void run() {
// wait for the other to come on line
long waitEnd = System.currentTimeMillis() + 45000;
DistributionAdvisor adv = ((DistributedRegion) CCRegion).getCacheDistributionAdvisor();
while (System.currentTimeMillis() < waitEnd && adv.adviseGeneric().isEmpty()) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
return;
}
}
if (adv.adviseGeneric().isEmpty()) {
fail("other member never came on line");
}
// inhibit all messaging
DistributedCacheOperation.LOSS_SIMULATION_RATIO = 200.0;
try {
CCRegion.put("mykey", "initialValue");
CCRegion.destroy("mykey");
} finally {
DistributedCacheOperation.LOSS_SIMULATION_RATIO = 0.0;
}
// generate a fake version tag for the message
VersionTag tag = CCRegion.getRegionEntry(key).getVersionStamp().asVersionTag();
// create a fake member ID that will be < mine and lose a concurrency check
NetMember nm = CCRegion.getDistributionManager().getDistributionManagerId().getNetMember();
InternalDistributedMember mbr = null;
try {
mbr = new InternalDistributedMember(nm.getInetAddress().getCanonicalHostName(), nm.getPort() - 1, "fake_id", "fake_id_ustring", DistributionManager.NORMAL_DM_TYPE, null, null);
tag.setMemberID(mbr);
} catch (UnknownHostException e) {
org.apache.geode.test.dunit.Assert.fail("could not create member id", e);
}
// generate an event to distribute that contains the fake version tag
EntryEventImpl event = EntryEventImpl.create(CCRegion, Operation.UPDATE, key, false, mbr, true, false);
event.setNewValue("newValue");
event.setVersionTag(tag);
// this should update the controller's cache with the updated value but leave this cache
// alone
DistributedCacheOperation op = new UpdateOperation(event, tag.getVersionTimeStamp());
op.distribute();
event.release();
}
});
try {
partialCreate.getResult();
} catch (Throwable e) {
org.apache.geode.test.dunit.Assert.fail("async invocation in vm2 failed", e);
}
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class TXDistributedDUnitTest method testLockBatchParticipantsUpdate.
/**
* Test update of lock batch participants (needed when new members are discovered between a
* commit's locking phase and the applicatoin of the Region's data. See bug 32999
*/
@Test
public void testLockBatchParticipantsUpdate() throws Exception {
final String rgnName = getUniqueName();
Region rgn = getCache().createRegion(rgnName, getRegionAttributes());
rgn.create("key", null);
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableRunnable initRegions = new SerializableRunnable("testLockBatchParticipantsUpdate: initial configuration") {
public void run() {
try {
Region rgn1 = getCache().createRegion(rgnName, getRegionAttributes());
rgn1.create("key", null);
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
};
vm0.invoke(initRegions);
vm1.invoke(initRegions);
rgn.put("key", "val1");
// Connect vm2 also since it may have been shutdown when logPerTest
// is turned on
vm2.invoke(new SerializableRunnable("connect vm2 if not connected") {
public void run() {
getCache();
}
});
// Make VM0 the Grantor
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: remote grantor init") {
public void run() {
try {
Region rgn1 = getCache().getRegion(rgnName);
final CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
assertEquals("val1", rgn1.getEntry("key").getValue());
txMgr2.begin();
rgn1.put("key", "val2");
txMgr2.commit();
assertNotNull(TXLockService.getDTLS());
assertTrue(TXLockService.getDTLS().isLockGrantor());
} catch (CacheException e) {
fail("While performing first transaction");
}
}
});
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
assertEquals("val2", rgn.getEntry("key").getValue());
// Build sets of System Ids and set them up on VM0 for future batch member checks
HashSet txMembers = new HashSet(4);
txMembers.add(getSystemId());
txMembers.add(vm0.invoke(() -> TXDistributedDUnitTest.getSystemId()));
vm0.invoke(() -> TXDistributedDUnitTest.setPreTXSystemIds(txMembers));
txMembers.add(vm2.invoke(() -> TXDistributedDUnitTest.getSystemId()));
vm0.invoke(() -> TXDistributedDUnitTest.setPostTXSystemIds(txMembers));
// Don't include the tx host in the batch member set(s)
Serializable vm1HostId = (Serializable) vm1.invoke(() -> TXDistributedDUnitTest.getSystemId());
vm0.invoke(() -> TXDistributedDUnitTest.setTXHostSystemId(vm1HostId));
// Create a TX on VM1 (such that it will ask for locks on VM0) that uses the callbacks
// to pause and give us time to start a GII process on another VM
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: slow tx (one that detects new member)") {
public void run() {
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
PausibleTX pauseTXRunnable = new PausibleTX();
pauseTXRunnable.rgnName = rgnName;
pauseTXRunnable.myCache = getCache();
pauseTXRunnable.key = "key";
pauseTXRunnable.value = "val3";
new Thread(pauseTXRunnable, "PausibleTX Thread").start();
synchronized (PausibleTX.class) {
while (!pauseTXRunnable.getIsRunning()) {
try {
PausibleTX.class.wait();
} catch (InterruptedException ie) {
fail("Did not expect " + ie);
}
}
}
}
});
// Verify that the lock batch exists VM0 and has the size we expect
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Verify lock batch exists on VM0 with expected size") {
public void run() {
getCache().getRegion(rgnName);
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull(dtls);
assertTrue(dtls.isLockGrantor());
DLockService dLockSvc = dtls.getInternalDistributedLockService();
assertNotNull(TXDistributedDUnitTest.txHostId);
DLockBatch[] batches = dLockSvc.getGrantor().getLockBatches((InternalDistributedMember) TXDistributedDUnitTest.txHostId);
assertEquals(batches.length, 1);
TXLockBatch txLockBatch = (TXLockBatch) batches[0];
assertNotNull(txLockBatch);
assertNotNull(TXDistributedDUnitTest.preTXSystemIds);
assertTrue("Members in lock batch " + txLockBatch.getParticipants() + " not the same as " + TXDistributedDUnitTest.preTXSystemIds, txLockBatch.getParticipants().equals(TXDistributedDUnitTest.preTXSystemIds));
}
});
// Start a GII process on VM2
vm2.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: start GII") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setDataPolicy(DataPolicy.REPLICATE);
getCache().createRegion(rgnName, factory.create());
} catch (CacheException e) {
Assert.fail("While creating region", e);
}
}
});
// Notify TX on VM1 so that it can continue
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Notfiy VM1 TX to continue") {
public void run() {
synchronized (PausibleTX.class) {
// Notify VM1 that it should proceed to the TX send
PausibleTX.class.notifyAll();
// Wait until VM1 has sent the TX
try {
PausibleTX.class.wait();
} catch (InterruptedException ie) {
fail("Did not expect " + ie);
}
}
}
});
// Verify that the batch on VM0 has added VM2 into the set
vm0.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Verify lock batch contains VM2") {
public void run() {
getCache().getRegion(rgnName);
TXLockServiceImpl dtls = (TXLockServiceImpl) TXLockService.getDTLS();
assertNotNull(dtls);
assertTrue(dtls.isLockGrantor());
DLockService dLockSvc = dtls.getInternalDistributedLockService();
assertNotNull(TXDistributedDUnitTest.txHostId);
DLockBatch[] batches = dLockSvc.getGrantor().getLockBatches((InternalDistributedMember) TXDistributedDUnitTest.txHostId);
assertEquals(batches.length, 1);
TXLockBatch txLockBatch = (TXLockBatch) batches[0];
assertNotNull(txLockBatch);
assertNotNull(TXDistributedDUnitTest.preTXSystemIds);
assertTrue("Members in lock batch " + txLockBatch.getParticipants() + " not the same as " + TXDistributedDUnitTest.postTXSystemIds, txLockBatch.getParticipants().equals(TXDistributedDUnitTest.postTXSystemIds));
}
});
// fix for bug 38843 causes the DTLS to be created in every TX participant
assertNotNull(TXLockService.getDTLS());
assertFalse(TXLockService.getDTLS().isLockGrantor());
assertEquals("val3", rgn.getEntry("key").getValue());
// Notify TX on VM1 that it can go ahead and complete the TX
vm1.invoke(new SerializableRunnable("testLockBatchParticipantsUpdate: Notfiy VM1 TX to finish") {
public void run() {
synchronized (PausibleTX.class) {
// Notify VM1 that it should finish the TX
PausibleTX.class.notifyAll();
}
}
});
rgn.destroyRegion();
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistributedLockServiceDUnitTest method testBasicGrantorRecovery.
@Test
public void testBasicGrantorRecovery() {
// DLockGrantor.setUncleanDestroyEnabled(true);
// try {
// 1) start up 4 VM members...
int numVMs = 4;
final String serviceName = "testBasicGrantorRecovery_" + getUniqueName();
distributedCreateService(numVMs, serviceName);
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
fail("interrupted");
}
final Object[] args = new Object[] { serviceName };
final Host host = Host.getHost(0);
int originalGrantor = 3;
host.getVM(originalGrantor).invoke(DistributedLockServiceDUnitTest.class, "identifyLockGrantor", args);
// 2) find the grantor and disconnect him...
int originalVM = -1;
InternalDistributedMember oldGrantor = null;
for (int vm = 0; vm < numVMs; vm++) {
final int finalvm = vm;
Boolean isGrantor = (Boolean) host.getVM(finalvm).invoke(DistributedLockServiceDUnitTest.class, "isLockGrantor", args);
if (isGrantor.booleanValue()) {
originalVM = vm;
oldGrantor = (InternalDistributedMember) host.getVM(finalvm).invoke(DistributedLockServiceDUnitTest.class, "identifyLockGrantor", args);
break;
}
}
assertTrue(originalVM == originalGrantor);
host.getVM(originalVM).invoke(new SerializableRunnable() {
public void run() {
disconnectFromDS();
}
});
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
fail("interrupted");
}
// 3) verify that another member recovers for grantor
int attempts = 3;
for (int attempt = 0; attempt < attempts; attempt++) {
try {
for (int vm = 0; vm < numVMs; vm++) {
if (vm == originalVM)
// skip because he's disconnected
continue;
final int finalvm = vm;
logInfo("[testBasicGrantorRecovery] VM " + finalvm + " in " + serviceName + " about to invoke");
InternalDistributedMember id = (InternalDistributedMember) host.getVM(finalvm).invoke(DistributedLockServiceDUnitTest.class, "identifyLockGrantor", args);
logInfo("[testBasicGrantorRecovery] VM " + finalvm + " in " + serviceName + " got " + id);
assertGrantorIsConsistent(id);
logInfo("[testBasicGrantorRecovery] new grantor " + id + " is not old grantor " + oldGrantor);
// new
assertEquals("New grantor must not equal the old grantor", true, !id.equals(oldGrantor));
// grantor
// !=
// old
// grantor
}
// loop thru vms
logInfo("[testBasicGrantorRecovery] succeeded attempt " + attempt);
// success
break;
} catch (AssertionError e) {
logInfo("[testBasicGrantorRecovery] failed attempt " + attempt);
if (attempt == attempts - 1)
throw e;
}
}
// loop thru attempts
// }
// finally {
// DLockGrantor.setUncleanDestroyEnabled(false);
// }
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistributedLockServiceDUnitTest method testGrantorSelection.
@Test
public void testGrantorSelection() {
// TODO change distributedCreateService usage to be concurrent threads
// bring up 4 members and make sure all identify one as grantor
int numVMs = 4;
final String serviceName = "testGrantorSelection_" + getUniqueName();
distributedCreateService(numVMs, serviceName);
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
fail("interrupted");
}
final Object[] args = new Object[] { serviceName };
final Host host = Host.getHost(0);
for (int vm = 0; vm < numVMs; vm++) {
final int finalvm = vm;
logInfo("VM " + finalvm + " in " + serviceName + " about to invoke");
InternalDistributedMember id = (InternalDistributedMember) host.getVM(finalvm).invoke(DistributedLockServiceDUnitTest.class, "identifyLockGrantor", args);
logInfo("VM " + finalvm + " in " + serviceName + " got " + id);
assertGrantorIsConsistent(id);
}
}
Aggregations