use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class PersistentColocatedPartitionedRegionDUnitTest method testCrashDuringRedundancySatisfaction.
/**
* Test what happens when we crash in the middle of satisfying redundancy for a colocated bucket.
*
* @throws Throwable
*/
// This test method is disabled because it is failing
// periodically and causing cruise control failures
// See bug #46748
@Test
public void testCrashDuringRedundancySatisfaction() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
SerializableRunnable createPRs = new SerializableRunnable("region1") {
public void run() {
Cache cache = getCache();
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
// Workaround for 44414 - disable recovery delay so we shutdown
// vm1 at a predictable point.
paf.setRecoveryDelay(-1);
paf.setStartupRecoveryDelay(-1);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
cache.createRegion(PR_REGION_NAME, af.create());
paf.setColocatedWith(PR_REGION_NAME);
af.setPartitionAttributes(paf.create());
cache.createRegion("region2", af.create());
}
};
// Create the PR on vm0
vm0.invoke(createPRs);
// Create some buckets.
createData(vm0, 0, NUM_BUCKETS, "a");
createData(vm0, 0, NUM_BUCKETS, "a", "region2");
vm1.invoke(createPRs);
// We shouldn't have created any buckets in vm1 yet.
assertEquals(Collections.emptySet(), getBucketList(vm1));
// Add an observer that will disconnect before allowing the peer to
// GII a colocated bucket. This should leave the peer with only the parent
// bucket
vm0.invoke(new SerializableRunnable() {
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof RequestImageMessage) {
if (((RequestImageMessage) message).regionPath.contains("region2")) {
DistributionMessageObserver.setInstance(null);
disconnectFromDS();
}
}
}
});
}
});
IgnoredException ex = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
try {
// as we satisfy redundancy with vm1.
try {
RebalanceResults rr = rebalance(vm1);
} catch (Exception expected) {
// disconnect
if (!(expected.getCause() instanceof PartitionOfflineException)) {
throw expected;
}
}
// Wait for vm0 to be closed by the callback
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Wait.waitForCriterion(new WaitCriterion() {
public boolean done() {
InternalDistributedSystem ds = basicGetSystem();
return ds == null || !ds.isConnected();
}
public String description() {
return "DS did not disconnect";
}
}, MAX_WAIT, 100, true);
return null;
}
});
// close the cache in vm1
SerializableCallable disconnectFromDS = new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
return null;
}
};
vm1.invoke(disconnectFromDS);
// Make sure vm0 is disconnected. This avoids a race where we
// may still in the process of disconnecting even though the our async listener
// found the system was disconnected
vm0.invoke(disconnectFromDS);
} finally {
ex.remove();
}
// Create the cache and PRs on both members
AsyncInvocation async0 = vm0.invokeAsync(createPRs);
AsyncInvocation async1 = vm1.invokeAsync(createPRs);
async0.getResult(MAX_WAIT);
async1.getResult(MAX_WAIT);
// Make sure the data was recovered correctly
checkData(vm0, 0, NUM_BUCKETS, "a");
// Workaround for bug 46748.
checkData(vm0, 0, NUM_BUCKETS, "a", "region2");
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class OutOfOffHeapMemoryDUnitTest method testOtherMembersSeeOutOfOffHeapMemoryMemberDisconnects.
@Test
public void testOtherMembersSeeOutOfOffHeapMemoryMemberDisconnects() {
final int vmCount = Host.getHost(0).getVMCount();
final String name = getRegionName();
final RegionShortcut shortcut = getRegionShortcut();
final int biggerVM = 0;
final int smallerVM = 1;
Host.getHost(0).getVM(smallerVM).invoke(new SerializableRunnable() {
public void run() {
OutOfOffHeapMemoryDUnitTest.isSmallerVM.set(true);
}
});
// create off-heap region in all members
for (int i = 0; i < vmCount; i++) {
Host.getHost(0).getVM(i).invoke(new SerializableRunnable() {
public void run() {
OutOfOffHeapMemoryDUnitTest.cache.set(getCache());
OutOfOffHeapMemoryDUnitTest.system.set(getSystem());
final Region<Object, Object> region = OutOfOffHeapMemoryDUnitTest.cache.get().createRegionFactory(shortcut).setOffHeap(true).create(name);
assertNotNull(region);
}
});
}
// make sure there are vmCount+1 members total
for (int i = 0; i < vmCount; i++) {
Host.getHost(0).getVM(i).invoke(new SerializableRunnable() {
public void run() {
assertFalse(OutOfOffHeapMemoryDUnitTest.cache.get().isClosed());
assertTrue(OutOfOffHeapMemoryDUnitTest.system.get().isConnected());
// +1 for locator
final int countMembersPlusLocator = vmCount + 1;
// -1 one for self
final int countOtherMembers = vmCount - 1;
assertEquals(countMembersPlusLocator, ((InternalDistributedSystem) OutOfOffHeapMemoryDUnitTest.system.get()).getDistributionManager().getDistributionManagerIds().size());
assertEquals(countOtherMembers, ((DistributedRegion) OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name)).getDistributionAdvisor().getNumProfiles());
}
});
}
// perform puts in bigger member until smaller member goes OOOHME
Host.getHost(0).getVM(biggerVM).invoke(new SerializableRunnable() {
public void run() {
final long TIME_LIMIT = 30 * 1000;
final StopWatch stopWatch = new StopWatch(true);
// -1 for self
int countOtherMembers = vmCount - 1;
// -1 for self, -1 for smallerVM
final int countOtherMembersMinusSmaller = vmCount - 1 - 1;
final Region<Object, Object> region = OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name);
for (int i = 0; countOtherMembers > countOtherMembersMinusSmaller; i++) {
region.put("key-" + i, new byte[1024]);
countOtherMembers = ((DistributedRegion) OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name)).getDistributionAdvisor().getNumProfiles();
assertTrue("puts failed to push member out of off-heap memory within time limit", stopWatch.elapsedTimeMillis() < TIME_LIMIT);
}
assertEquals("Member did not depart from OutOfOffHeapMemory", countOtherMembersMinusSmaller, countOtherMembers);
}
});
// verify that member with OOOHME closed
Host.getHost(0).getVM(smallerVM).invoke(new SerializableRunnable() {
public void run() {
assertTrue(OutOfOffHeapMemoryDUnitTest.cache.get().isClosed());
assertFalse(OutOfOffHeapMemoryDUnitTest.system.get().isConnected());
}
});
// verify that all other members noticed smaller member closed
for (int i = 0; i < vmCount; i++) {
if (i == smallerVM) {
continue;
}
Host.getHost(0).getVM(i).invoke(new SerializableRunnable() {
public void run() {
// +1 for locator, -1 for OOOHME
final int countMembersPlusLocator = vmCount + 1 - 1;
// member
// -1 for self, -1 for OOOHME member
final int countOtherMembers = vmCount - 1 - 1;
with().pollInterval(10, TimeUnit.MILLISECONDS).await().atMost(30, TimeUnit.SECONDS).until(numDistributionManagers(), equalTo(countMembersPlusLocator));
with().pollInterval(10, TimeUnit.MILLISECONDS).await().atMost(30, TimeUnit.SECONDS).until(numProfiles(), equalTo(countOtherMembers));
}
private Callable<Integer> numProfiles() {
return () -> {
DistributedRegion dr = (DistributedRegion) OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name);
return dr.getDistributionAdvisor().getNumProfiles();
};
}
private Callable<Integer> numDistributionManagers() {
return () -> {
InternalDistributedSystem ids = (InternalDistributedSystem) OutOfOffHeapMemoryDUnitTest.system.get();
return ids.getDistributionManager().getDistributionManagerIds().size();
};
}
});
}
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class AuthJUnitTest method testAuthConfig.
@Test
public void testAuthConfig() {
setupCacheWithPassword();
InternalDistributedSystem iD = (InternalDistributedSystem) cache.getDistributedSystem();
assert (iD.getConfig().getRedisPassword().equals(PASSWORD));
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class PeerAuthenticatorWithCachelessLocatorDUnitTest method testPeerAuthenticator.
@Test
public void testPeerAuthenticator() throws Exception {
int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
locator.invoke(() -> {
Properties props = new Properties();
props.setProperty(SECURITY_PEER_AUTHENTICATOR, DummyAuthenticator.class.getName());
props.setProperty(MCAST_PORT, "0");
props.put(JMX_MANAGER, "true");
props.put(JMX_MANAGER_START, "true");
props.put(JMX_MANAGER_PORT, "0");
props.setProperty("start-locator", "localhost[" + locatorPort + "]");
DistributedSystem.connect(props);
});
// set up server with security
String locators = "localhost[" + locatorPort + "]";
server.invoke(() -> {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, locators);
// the following are needed for peer-to-peer authentication
props.setProperty("security-username", "user");
props.setProperty("security-password", "user");
// this should execute without exception
InternalDistributedSystem ds = getSystem(props);
});
server1.invoke(() -> {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, locators);
// the following are needed for peer-to-peer authentication
props.setProperty("security-username", "bogus");
props.setProperty("security-password", "user");
assertThatThrownBy(() -> getSystem(props)).isInstanceOf(GemFireSecurityException.class);
});
}
use of org.apache.geode.distributed.internal.InternalDistributedSystem in project geode by apache.
the class Fakes method cache.
/**
* A fake cache, which contains a fake distributed system, distribution manager, etc.
*/
public static GemFireCacheImpl cache() {
GemFireCacheImpl cache = mock(GemFireCacheImpl.class);
InternalDistributedSystem system = mock(InternalDistributedSystem.class);
DistributionConfig config = mock(DistributionConfig.class);
DistributionManager distributionManager = mock(DistributionManager.class);
CancelCriterion systemCancelCriterion = mock(CancelCriterion.class);
DSClock clock = mock(DSClock.class);
LogWriter logger = mock(LogWriter.class);
Statistics stats = mock(Statistics.class);
InternalDistributedMember member;
member = new InternalDistributedMember("localhost", 5555);
when(config.getCacheXmlFile()).thenReturn(new File(""));
when(config.getDeployWorkingDir()).thenReturn(new File("."));
when(cache.getDistributedSystem()).thenReturn(system);
when(cache.getInternalDistributedSystem()).thenReturn(system);
when(cache.getSystem()).thenReturn(system);
when(cache.getMyId()).thenReturn(member);
when(cache.getDistributionManager()).thenReturn(distributionManager);
when(cache.getCancelCriterion()).thenReturn(systemCancelCriterion);
when(cache.getCachePerfStats()).thenReturn(mock(CachePerfStats.class));
when(system.getDistributedMember()).thenReturn(member);
when(system.getConfig()).thenReturn(config);
when(system.getDistributionManager()).thenReturn(distributionManager);
when(system.getCancelCriterion()).thenReturn(systemCancelCriterion);
when(system.getClock()).thenReturn(clock);
when(system.getLogWriter()).thenReturn(logger);
when(system.createAtomicStatistics(any(), any(), anyLong())).thenReturn(stats);
when(system.createAtomicStatistics(any(), any())).thenReturn(stats);
when(distributionManager.getId()).thenReturn(member);
when(distributionManager.getDistributionManagerId()).thenReturn(member);
when(distributionManager.getConfig()).thenReturn(config);
when(distributionManager.getSystem()).thenReturn(system);
when(distributionManager.getCancelCriterion()).thenReturn(systemCancelCriterion);
return cache;
}
Aggregations