use of org.apache.geode.distributed.internal.DistributionManager in project geode by apache.
the class FetchPartitionDetailsMessageTest method shouldBeMockable.
@Test
public void shouldBeMockable() throws Exception {
FetchPartitionDetailsMessage mockFetchPartitionDetailsMessage = mock(FetchPartitionDetailsMessage.class);
DistributionManager mockDistributionManager = mock(DistributionManager.class);
PartitionedRegion mockPartitionedRegion = mock(PartitionedRegion.class);
long startTime = System.currentTimeMillis();
Object key = new Object();
when(mockFetchPartitionDetailsMessage.operateOnPartitionedRegion(eq(mockDistributionManager), eq(mockPartitionedRegion), eq(startTime))).thenReturn(true);
assertThat(mockFetchPartitionDetailsMessage.operateOnPartitionedRegion(mockDistributionManager, mockPartitionedRegion, startTime)).isTrue();
}
use of org.apache.geode.distributed.internal.DistributionManager in project geode by apache.
the class Bug41733DUnitTest method testCrashAfterBucketCreation.
/**
* Test the we can handle a member departing after creating a bucket on the remote node but before
* we choose a primary
*/
@Test
public void testCrashAfterBucketCreation() throws Throwable {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(new SerializableRunnable("Install observer") {
public void run() {
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof ManageBucketReplyMessage) {
disconnectFromDS();
}
}
});
}
});
createPR(vm0, 0);
// Create a couple of buckets in VM0. This will make sure
// the next bucket we create will be created in VM 1.
putData(vm0, 0, 2, "a");
createPR(vm1, 0);
// Trigger a bucket creation in VM1, which should cause vm0 to close it's cache.
try {
putData(vm0, 3, 4, "a");
fail("should have received a cache closed exception");
} catch (RMIException e) {
if (!(e.getCause() instanceof DistributedSystemDisconnectedException)) {
throw e;
}
}
assertEquals(Collections.singleton(3), getBucketList(vm1));
// This shouldn't hang, because the bucket creation should finish,.
putData(vm1, 3, 4, "a");
}
use of org.apache.geode.distributed.internal.DistributionManager in project geode by apache.
the class PdxSerializableDUnitTest method testVmWaitsForPdxType.
@Test
public void testVmWaitsForPdxType() throws Throwable {
VM vm0 = Host.getHost(0).getVM(0);
VM vm1 = Host.getHost(0).getVM(1);
getBlackboard().initBlackboard();
final Properties properties = getDistributedSystemProperties();
properties.put("conserve-sockets", "false");
// steps:
// 1 create two caches and define a PdxType
// 2 install a block in VM1 that delays receipt of new PDX types
// 3 update the value of the PdxInstance in VM0 using a new Enum type
// 4 get the value in VM0
// The result should be that step 4 hangs unless the bug is fixed
vm0.invoke("create cache", () -> {
Cache cache = getCache(properties);
Region region = cache.createRegionFactory(RegionShortcut.REPLICATE).create("testRegion");
region.put("TestObject", new TestPdxObject("aString", 1, 1.0, TestPdxObject.AnEnum.ONE));
});
vm1.invoke("create cache and region", () -> {
Cache cache = getCache(properties);
// note that initial image transfer in testRegion will cause the object to be serialized in
// vm0
// and populate the PdxRegion in this vm
cache.createRegionFactory(RegionShortcut.REPLICATE).create("testRegion");
// this message observer will ensure that a new PDX registration doesn't occur
final DUnitBlackboard bb = getBlackboard();
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage msg) {
if (msg instanceof DistributedCacheOperation.CacheOperationMessage) {
try {
DistributedCacheOperation.CacheOperationMessage cmsg = (DistributedCacheOperation.CacheOperationMessage) msg;
String path = cmsg.getRegionPath();
if (path.equals(PeerTypeRegistration.REGION_FULL_PATH)) {
System.out.println("message observer found a PDX update message and is stalling: " + msg);
try {
bb.signalGate("listenerWasInvoked");
bb.waitForGate("pdxObjectGetStarting", 3, TimeUnit.SECONDS);
// let the get() commence and block
Thread.sleep(30000);
System.out.println("message observer after sleep ");
} catch (InterruptedException e) {
System.out.println("message observer is done stalling1 ");
bb.setMailbox("listenerProblem", e);
} catch (TimeoutException e) {
System.out.println("message observer is done stalling2");
bb.setMailbox("listenerProblem", e);
} finally {
System.out.println("message observer is done stalling");
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
});
});
AsyncInvocation async0 = vm0.invokeAsync("propagate value with new pdx enum type", () -> {
Cache cache = getCache(properties);
final Region pdxRegion = cache.getRegion(PeerTypeRegistration.REGION_FULL_PATH);
final DUnitBlackboard bb = getBlackboard();
// now we register a new Id for our enum in a different thread. This will
// block in vm1 due to its message observer
Thread t = new Thread("PdxSerializableDUnitTest async thread") {
public void run() {
bb.signalGate("asyncThreadReady");
// pdxRegion.put(new EnumId(0x3010101), new EnumInfo(TestPdxObject.AnEnum.TWO));
((GemFireCacheImpl) cache).getPdxRegistry().addRemoteEnum(0x3010101, new EnumInfo(TestPdxObject.AnEnum.TWO));
}
};
t.setDaemon(true);
t.start();
try {
bb.waitForGate("asyncThreadReady", 20, TimeUnit.SECONDS);
} catch (TimeoutException e) {
fail("timed out");
}
// reserialization will use the new Enumeration PDX type
Region region = cache.getRegion("testRegion");
bb.waitForGate("listenerWasInvoked", 20, TimeUnit.SECONDS);
region.put("TestObject", new TestPdxObject("TestObject'", 2, 2.0, TestPdxObject.AnEnum.TWO));
System.out.println("TestObject added put");
bb.signalGate("pdxObjectPut");
});
// vm0 has sent a new TestObject but vm1 does not have the enum type needed to
// deserialize it.
AsyncInvocation async1 = vm1.invokeAsync("try to read object w/o enum type", () -> {
DUnitBlackboard bb = getBlackboard();
bb.waitForGate("pdxObjectPut", 10, TimeUnit.SECONDS);
Region region = getCache(properties).getRegion("testRegion");
bb.signalGate("pdxObjectGetStarting");
Object testObject = region.get("TestObject");
System.out.println("found " + testObject);
});
DUnitBlackboard bb = getBlackboard();
try {
async0.join(20000);
async1.join(10000);
if (async0.exceptionOccurred()) {
throw async0.getException();
}
if (async1.exceptionOccurred()) {
throw async1.getException();
}
assertTrue(bb.isGateSignaled("listenerWasInvoked"));
/*
* Throwable throwable = (Throwable)bb.getMailbox("listenerProblem"); if (throwable != null) {
* RuntimeException rte = new RuntimeException("message observer had a problem", throwable);
* throw rte; }
*/
} finally {
bb.signalGate("pdxObjectGetStarting");
bb.signalGate("pdxObjectPut");
bb.initBlackboard();
}
}
use of org.apache.geode.distributed.internal.DistributionManager in project geode by apache.
the class LocatorDUnitTest method testLeadAndCoordFailure.
/**
* test lead member and coordinator failure with network partition detection enabled. It would be
* nice for this test to have more than two "server" vms, to demonstrate that they all exit when
* the leader and potential- coordinator both disappear in the loss-correlation-window, but there
* are only four vms available for dunit testing.
* <p>
* So, we start two locators with admin distributed systems, then start two regular distributed
* members.
* <p>
* We kill the second locator (which is not the view coordinator) and then kill the non-lead
* member. That should be okay - the lead and remaining locator continue to run.
* <p>
* We then kill the lead member and demonstrate that the original locator (which is now the sole
* remaining member) shuts itself down.
*/
@Test
public void testLeadAndCoordFailure() throws Exception {
IgnoredException.addIgnoredException("Possible loss of quorum due");
disconnectAllFromDS();
Host host = Host.getHost(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM locvm = host.getVM(3);
Locator locator = null;
int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
final int port1 = ports[0];
this.port1 = port1;
final int port2 = ports[1];
DistributedTestUtils.deleteLocatorStateFile(port1, port2);
final String host0 = NetworkUtils.getServerHostName(host);
final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
final Properties properties = new Properties();
properties.put(MCAST_PORT, "0");
properties.put(LOCATORS, locators);
properties.put(ENABLE_NETWORK_PARTITION_DETECTION, "true");
properties.put(DISABLE_AUTO_RECONNECT, "true");
properties.put(MEMBER_TIMEOUT, "2000");
properties.put(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
// properties.put("log-level", "fine");
properties.put(ENABLE_CLUSTER_CONFIGURATION, "false");
addDSProps(properties);
try {
final String uname = getUniqueName();
File logFile = new File("");
locator = Locator.startLocatorAndDS(port1, logFile, properties);
final DistributedSystem sys = locator.getDistributedSystem();
sys.getLogWriter().info("<ExpectedException action=add>java.net.ConnectException</ExpectedException>");
MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
locvm.invoke(new SerializableRunnable() {
public void run() {
File lf = new File("");
try {
Locator.startLocatorAndDS(port2, lf, properties);
} catch (IOException ios) {
org.apache.geode.test.dunit.Assert.fail("Unable to start locator2", ios);
}
}
});
Object[] connectArgs = new Object[] { properties };
SerializableRunnable crashLocator = new SerializableRunnable("Crash locator") {
public void run() {
Locator loc = Locator.getLocators().iterator().next();
DistributedSystem msys = loc.getDistributedSystem();
MembershipManagerHelper.crashDistributedSystem(msys);
loc.stop();
}
};
assertTrue(MembershipManagerHelper.getLeadMember(sys) == null);
// properties.put("log-level", getDUnitLogLevel());
DistributedMember mem1 = (DistributedMember) vm1.invoke(this.getClass(), "getDistributedMember", connectArgs);
vm2.invoke(this.getClass(), "getDistributedMember", connectArgs);
assertLeadMember(mem1, sys, 5000);
assertEquals(sys.getDistributedMember(), MembershipManagerHelper.getCoordinator(sys));
// crash the second vm and the locator. Should be okay
DistributedTestUtils.crashDistributedSystem(vm2);
locvm.invoke(crashLocator);
assertTrue("Distributed system should not have disconnected", vm1.invoke(() -> LocatorDUnitTest.isSystemConnected()));
// ensure quorumLost is properly invoked
DistributionManager dm = (DistributionManager) ((InternalDistributedSystem) sys).getDistributionManager();
MyMembershipListener listener = new MyMembershipListener();
dm.addMembershipListener(listener);
// ensure there is an unordered reader thread for the member
new HighPriorityAckedMessage().send(Collections.singleton(mem1), false);
// disconnect the first vm and demonstrate that the third vm and the
// locator notice the failure and exit
DistributedTestUtils.crashDistributedSystem(vm1);
/*
* This vm is watching vm1, which is watching vm2 which is watching locvm. It will take 3 * (3
* * member-timeout) milliseconds to detect the full failure and eject the lost members from
* the view.
*/
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("waiting for my distributed system to disconnect due to partition detection");
Awaitility.waitAtMost(24000, TimeUnit.MILLISECONDS).pollInterval(200, TimeUnit.MILLISECONDS).until(() -> {
return !sys.isConnected();
});
if (sys.isConnected()) {
fail("Distributed system did not disconnect as expected - network partition detection is broken");
}
// quorumLost should be invoked if we get a ForcedDisconnect in this situation
assertTrue("expected quorumLost to be invoked", listener.quorumLostInvoked);
assertTrue("expected suspect processing initiated by TCPConduit", listener.suspectReasons.contains(Connection.INITIATING_SUSPECT_PROCESSING));
} finally {
if (locator != null) {
locator.stop();
}
LogWriter bLogger = new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
bLogger.info("<ExpectedException action=remove>service failure</ExpectedException>");
bLogger.info("<ExpectedException action=remove>java.net.ConnectException</ExpectedException>");
bLogger.info("<ExpectedException action=remove>org.apache.geode.ForcedDisconnectException</ExpectedException>");
disconnectAllFromDS();
}
}
use of org.apache.geode.distributed.internal.DistributionManager in project geode by apache.
the class HostedLocatorsDUnitTest method testGetAllHostedLocators.
@Test
public void testGetAllHostedLocators() throws Exception {
final InternalDistributedSystem system = getSystem();
final String dunitLocator = system.getConfig().getLocators();
assertNotNull(dunitLocator);
assertFalse(dunitLocator.isEmpty());
final int[] ports = getRandomAvailableTCPPorts(4);
final String uniqueName = getUniqueName();
for (int i = 0; i < 4; i++) {
final int whichvm = i;
getHost(0).getVM(whichvm).invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
try {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "locators", dunitLocator);
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + MCAST_PORT, "0");
final String name = uniqueName + "-" + whichvm;
final File subdir = new File(name);
subdir.mkdir();
assertTrue(subdir.exists() && subdir.isDirectory());
final Builder builder = new Builder().setMemberName(name).setPort(ports[whichvm]).setRedirectOutput(true).setWorkingDirectory(name);
launcher = builder.build();
assertEquals(Status.ONLINE, launcher.start().getStatus());
waitForLocatorToStart(launcher, TIMEOUT_MILLISECONDS, 10, true);
return null;
} finally {
System.clearProperty(DistributionConfig.GEMFIRE_PREFIX + "locators");
System.clearProperty(DistributionConfig.GEMFIRE_PREFIX + MCAST_PORT);
}
}
});
}
final String host = SocketCreator.getLocalHost().getHostAddress();
final Set<String> locators = new HashSet<String>();
locators.add(host + "[" + dunitLocator.substring(dunitLocator.indexOf("[") + 1, dunitLocator.indexOf("]")) + "]");
for (int port : ports) {
locators.add(host + "[" + port + "]");
}
// validation within non-locator
final DistributionManager dm = (DistributionManager) system.getDistributionManager();
final Set<InternalDistributedMember> locatorIds = dm.getLocatorDistributionManagerIds();
assertEquals(5, locatorIds.size());
final Map<InternalDistributedMember, Collection<String>> hostedLocators = dm.getAllHostedLocators();
assertTrue(!hostedLocators.isEmpty());
assertEquals(5, hostedLocators.size());
for (InternalDistributedMember member : hostedLocators.keySet()) {
assertEquals(1, hostedLocators.get(member).size());
final String hostedLocator = hostedLocators.get(member).iterator().next();
assertTrue(locators + " does not contain " + hostedLocator, locators.contains(hostedLocator));
}
// validate fix for #46324
for (int whichvm = 0; whichvm < 4; whichvm++) {
getHost(0).getVM(whichvm).invoke(new SerializableRunnable() {
@Override
public void run() {
final DistributionManager dm = (DistributionManager) InternalDistributedSystem.getAnyInstance().getDistributionManager();
final InternalDistributedMember self = dm.getDistributionManagerId();
final Set<InternalDistributedMember> locatorIds = dm.getLocatorDistributionManagerIds();
assertTrue(locatorIds.contains(self));
final Map<InternalDistributedMember, Collection<String>> hostedLocators = dm.getAllHostedLocators();
assertTrue("hit bug #46324: " + hostedLocators + " is missing " + InternalLocator.getLocatorStrings() + " for " + self, hostedLocators.containsKey(self));
}
});
}
// validation with locators
for (int whichvm = 0; whichvm < 4; whichvm++) {
getHost(0).getVM(whichvm).invoke(new SerializableRunnable() {
@Override
public void run() {
final DistributionManager dm = (DistributionManager) InternalDistributedSystem.getAnyInstance().getDistributionManager();
final Set<InternalDistributedMember> locatorIds = dm.getLocatorDistributionManagerIds();
assertEquals(5, locatorIds.size());
final Map<InternalDistributedMember, Collection<String>> hostedLocators = dm.getAllHostedLocators();
assertTrue(!hostedLocators.isEmpty());
assertEquals(5, hostedLocators.size());
for (InternalDistributedMember member : hostedLocators.keySet()) {
assertEquals(1, hostedLocators.get(member).size());
final String hostedLocator = hostedLocators.get(member).iterator().next();
assertTrue(locators + " does not contain " + hostedLocator, locators.contains(hostedLocator));
}
}
});
}
}
Aggregations