use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class ServerLauncherTest method testIsDefaultServerEnabledWhenCacheServersExist.
@Test
public void testIsDefaultServerEnabledWhenCacheServersExist() {
final Cache mockCache = mockContext.mock(Cache.class, "Cache");
final CacheServer mockCacheServer = mockContext.mock(CacheServer.class, "CacheServer");
mockContext.checking(new Expectations() {
{
oneOf(mockCache).getCacheServers();
will(returnValue(Collections.singletonList(mockCacheServer)));
}
});
final ServerLauncher serverLauncher = new Builder().setMemberName("serverOne").setDisableDefaultServer(false).build();
assertNotNull(serverLauncher);
assertEquals("serverOne", serverLauncher.getMemberName());
assertFalse(serverLauncher.isDisableDefaultServer());
assertFalse(serverLauncher.isDefaultServerEnabled(mockCache));
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class DeltaSizingDUnitTest method doTest.
public void doTest(final AccessorFactory accessorFactory, final boolean clone, final boolean copyOnRead) throws InterruptedException {
final Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
SerializableCallable createDataRegion = new SerializableCallable("createRegion") {
public Object call() throws Exception {
Cache cache = getCache();
cache.setCopyOnRead(copyOnRead);
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(1);
PartitionAttributes prAttr = paf.create();
attr.setPartitionAttributes(prAttr);
attr.setCloningEnabled(clone);
// attr.setCacheWriter(new CacheWriterAdapter() {
//
// @Override
// public void beforeCreate(EntryEvent event)
// throws CacheWriterException {
// assertTrue(event.getOldValue() == null);
// assertTrue(event.getNewValue() instanceof MyClass);
// }
//
// @Override
// public void beforeUpdate(EntryEvent event)
// throws CacheWriterException {
// assertTrue(event.getOldValue() instanceof MyClass);
// assertTrue(event.getNewValue() instanceof MyClass);
// assertIndexDetailsEquals(event.getOldValue(), event.getNewValue());
// }
//
// });
cache.createRegion("region1", attr.create());
CacheServer server = cache.addCacheServer();
int port = AvailablePortHelper.getRandomAvailableTCPPort();
server.setPort(port);
server.start();
return Integer.valueOf(port);
}
};
final Integer port1 = (Integer) vm0.invoke(createDataRegion);
final Integer port2 = (Integer) vm1.invoke(createDataRegion);
SerializableRunnable createEmptyRegion = new SerializableRunnable("createRegion") {
public void run() {
Cache cache = getCache();
cache.setCopyOnRead(copyOnRead);
Region<Integer, TestDelta> region = accessorFactory.createRegion(host, cache, port1.intValue(), port2.intValue());
// This call just creates a bucket. We do an extra serialization on entries that trigger
// bucket creation. Thats a bug that should get fixed, but for now it's throwing off my
// assertions. So I'll force the creation of the bucket
region.put(new Integer(113), new TestDelta(false, "bogus"));
// Now put an entry in that we will modify
region.put(new Integer(0), new TestDelta(false, "initial"));
}
};
vm2.invoke(createEmptyRegion);
int clones = 0;
// Get the object size in both VMS
long size = checkObjects(vm0, 1, 1, 0, clones);
assertEquals(size, checkObjects(vm1, 1, 1, 0, clones));
// Now apply a delta
vm2.invoke(new SerializableRunnable("update") {
public void run() {
Cache cache = getCache();
Region<Object, TestDelta> region = cache.getRegion("region1");
region.put(new Integer(0), new TestDelta(true, "changedAAAAAAAA"));
}
});
clones = 0;
if (copyOnRead) {
// 1 clone to read the object when we test it (the object should be in deserialized form)
clones += 1;
} else if (clone) {
// 1 clone copy the object when we modify it (the object should be in serialized form)
clones += 1;
}
// Check to make sure the size hasn't changed
assertEquals(size, checkObjects(vm0, 1, 1, 1, clones));
assertEquals(size, checkObjects(vm1, 1, 1, 1, clones));
// Try another
vm2.invoke(new SerializableRunnable("update") {
public void run() {
Cache cache = getCache();
Region<Object, TestDelta> region = cache.getRegion("region1");
region.put(new Integer(0), new TestDelta(true, "changedBBBBBBB"));
}
});
if (clone || copyOnRead) {
// 1 clone to copy the object when we apply the delta.
clones += 1;
}
if (copyOnRead) {
// 1 clone to read the object when we test it
clones += 1;
}
// Check to make sure the size hasn't changed
assertEquals(size, checkObjects(vm0, 1, 1, 2, clones));
assertEquals(size, checkObjects(vm1, 1, 1, 2, clones));
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class DeltaPropagationStatsDUnitTest method createServerCache.
public static Integer createServerCache(Boolean flag, DataPolicy policy, Scope scope, Boolean listener) throws Exception {
ConnectionTable.threadWantsSharedResources();
DeltaPropagationStatsDUnitTest test = new DeltaPropagationStatsDUnitTest();
Properties props = new Properties();
if (!flag) {
props.setProperty(DELTA_PROPAGATION, "false");
}
cache = test.createCache(props);
AttributesFactory factory = new AttributesFactory();
factory.setScope(scope);
factory.setDataPolicy(policy);
if (listener) {
factory.addCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
if (event.getKey().equals(LAST_KEY)) {
lastKeyReceived = true;
}
}
});
}
Region region = cache.createRegion(REGION_NAME, factory.create());
if (!policy.isReplicate()) {
region.create("KEY", "KEY");
}
region.getAttributesMutator().setCloningEnabled(false);
CacheServer server = cache.addCacheServer();
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
server.setPort(port);
server.setNotifyBySubscription(true);
server.start();
return server.getPort();
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class GridAdvisorDUnitTest method test2by2usingGroups.
@Test
public void test2by2usingGroups() throws Exception {
disconnectAllFromDS();
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
List<Keeper> freeTCPPorts = AvailablePortHelper.getRandomAvailableTCPPortKeepers(6);
final Keeper keeper1 = freeTCPPorts.get(0);
final int port1 = keeper1.getPort();
final Keeper keeper2 = freeTCPPorts.get(1);
final int port2 = keeper2.getPort();
final Keeper bsKeeper1 = freeTCPPorts.get(2);
final int bsPort1 = bsKeeper1.getPort();
final Keeper bsKeeper2 = freeTCPPorts.get(3);
final int bsPort2 = bsKeeper2.getPort();
final Keeper bsKeeper3 = freeTCPPorts.get(4);
final int bsPort3 = bsKeeper3.getPort();
final Keeper bsKeeper4 = freeTCPPorts.get(5);
final int bsPort4 = bsKeeper4.getPort();
final String host0 = NetworkUtils.getServerHostName(host);
final String locators = host0 + "[" + port1 + "]" + "," + host0 + "[" + port2 + "]";
final Properties dsProps = new Properties();
dsProps.setProperty(LOCATORS, locators);
dsProps.setProperty(MCAST_PORT, "0");
dsProps.setProperty(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
dsProps.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
keeper1.release();
vm0.invoke(new SerializableRunnable("Start locators on " + port1) {
public void run() {
File logFile = new File(getUniqueName() + "-locator" + port1 + ".log");
try {
Locator.startLocatorAndDS(port1, logFile, null, dsProps, true, true, null);
} catch (IOException ex) {
Assert.fail("While starting locator on port " + port1, ex);
}
}
});
// try { Thread.currentThread().sleep(4000); } catch (InterruptedException ie) { }
keeper2.release();
vm3.invoke(new SerializableRunnable("Start locators on " + port2) {
public void run() {
File logFile = new File(getUniqueName() + "-locator" + port2 + ".log");
try {
Locator.startLocatorAndDS(port2, logFile, null, dsProps, true, true, "locator2HNFC");
} catch (IOException ex) {
Assert.fail("While starting locator on port " + port2, ex);
}
}
});
vm1.invoke(new SerializableRunnable("Connect to " + locators) {
public void run() {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, locators);
props.setProperty(GROUPS, "bs1Group1, bs1Group2");
props.setProperty(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
CacheFactory.create(DistributedSystem.connect(props));
}
});
vm2.invoke(new SerializableRunnable("Connect to " + locators) {
public void run() {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, locators);
props.setProperty(GROUPS, "bs2Group1, bs2Group2");
props.setProperty(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
CacheFactory.create(DistributedSystem.connect(props));
}
});
SerializableRunnable startBS1 = new SerializableRunnable("start bridgeServer on " + bsPort1) {
public void run() {
try {
Cache c = CacheFactory.getAnyInstance();
CacheServer bs = c.addCacheServer();
bs.setPort(bsPort1);
bs.start();
} catch (IOException ex) {
RuntimeException re = new RuntimeException();
re.initCause(ex);
throw re;
}
}
};
SerializableRunnable startBS3 = new SerializableRunnable("start bridgeServer on " + bsPort3) {
public void run() {
try {
Cache c = CacheFactory.getAnyInstance();
CacheServer bs = c.addCacheServer();
bs.setPort(bsPort3);
bs.start();
} catch (IOException ex) {
RuntimeException re = new RuntimeException();
re.initCause(ex);
throw re;
}
}
};
bsKeeper1.release();
vm1.invoke(startBS1);
bsKeeper3.release();
vm1.invoke(startBS3);
bsKeeper2.release();
vm2.invoke(new SerializableRunnable("start bridgeServer on " + bsPort2) {
public void run() {
try {
Cache c = CacheFactory.getAnyInstance();
CacheServer bs = c.addCacheServer();
bs.setPort(bsPort2);
bs.start();
} catch (IOException ex) {
RuntimeException re = new RuntimeException();
re.initCause(ex);
throw re;
}
}
});
bsKeeper4.release();
vm2.invoke(new SerializableRunnable("start bridgeServer on " + bsPort4) {
public void run() {
try {
Cache c = CacheFactory.getAnyInstance();
CacheServer bs = c.addCacheServer();
bs.setPort(bsPort4);
bs.start();
} catch (IOException ex) {
RuntimeException re = new RuntimeException();
re.initCause(ex);
throw re;
}
}
});
// verify that locators know about each other
vm0.invoke(new SerializableRunnable("Verify other locator on " + port2) {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
List others = ca.fetchControllers();
assertEquals(1, others.size());
{
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(0);
assertEquals(port2, cp.getPort());
assertEquals("locator2HNFC", cp.getHost());
}
others = ca.fetchBridgeServers();
assertEquals(4, others.size());
for (int j = 0; j < others.size(); j++) {
CacheServerAdvisor.CacheServerProfile bsp = (CacheServerAdvisor.CacheServerProfile) others.get(j);
if (bsp.getPort() == bsPort1) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort2) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort3) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort4) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else {
fail("unexpected port " + bsp.getPort() + " in " + bsp);
}
}
}
});
vm3.invoke(new SerializableRunnable("Verify other locator on " + port1) {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
List others = ca.fetchControllers();
assertEquals(1, others.size());
{
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(0);
assertEquals(port1, cp.getPort());
}
others = ca.fetchBridgeServers();
assertEquals(4, others.size());
for (int j = 0; j < others.size(); j++) {
CacheServerAdvisor.CacheServerProfile bsp = (CacheServerAdvisor.CacheServerProfile) others.get(j);
if (bsp.getPort() == bsPort1) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort2) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort3) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort4) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else {
fail("unexpected port " + bsp.getPort() + " in " + bsp);
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify bridge server view on " + bsPort1 + " and on " + bsPort3) {
public void run() {
Cache c = CacheFactory.getAnyInstance();
List bslist = c.getCacheServers();
assertEquals(2, bslist.size());
for (int i = 0; i < bslist.size(); i++) {
DistributionAdvisee advisee = (DistributionAdvisee) bslist.get(i);
CacheServerAdvisor bsa = (CacheServerAdvisor) advisee.getDistributionAdvisor();
List others = bsa.fetchBridgeServers();
LogWriterUtils.getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
assertEquals(3, others.size());
others = bsa.fetchControllers();
assertEquals(2, others.size());
for (int j = 0; j < others.size(); j++) {
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(j);
if (cp.getPort() == port1) {
// ok
} else if (cp.getPort() == port2) {
assertEquals("locator2HNFC", cp.getHost());
// ok
} else {
fail("unexpected port " + cp.getPort() + " in " + cp);
}
}
}
}
});
vm2.invoke(new SerializableRunnable("Verify bridge server view on " + bsPort2 + " and on " + bsPort4) {
public void run() {
Cache c = CacheFactory.getAnyInstance();
List bslist = c.getCacheServers();
assertEquals(2, bslist.size());
for (int i = 0; i < bslist.size(); i++) {
DistributionAdvisee advisee = (DistributionAdvisee) bslist.get(i);
CacheServerAdvisor bsa = (CacheServerAdvisor) advisee.getDistributionAdvisor();
List others = bsa.fetchBridgeServers();
LogWriterUtils.getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
assertEquals(3, others.size());
others = bsa.fetchControllers();
assertEquals(2, others.size());
for (int j = 0; j < others.size(); j++) {
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(j);
if (cp.getPort() == port1) {
// ok
} else if (cp.getPort() == port2) {
assertEquals("locator2HNFC", cp.getHost());
// ok
} else {
fail("unexpected port " + cp.getPort() + " in " + cp);
}
}
}
}
});
SerializableRunnable stopBS = new SerializableRunnable("stop bridge server") {
public void run() {
Cache c = CacheFactory.getAnyInstance();
List bslist = c.getCacheServers();
assertEquals(2, bslist.size());
CacheServer bs = (CacheServer) bslist.get(0);
bs.stop();
}
};
vm1.invoke(stopBS);
// now check to see if everyone else noticed him going away
vm0.invoke(new SerializableRunnable("Verify other locator on " + port2) {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
List others = ca.fetchControllers();
assertEquals(1, others.size());
{
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(0);
assertEquals(port2, cp.getPort());
assertEquals("locator2HNFC", cp.getHost());
}
others = ca.fetchBridgeServers();
assertEquals(3, others.size());
for (int j = 0; j < others.size(); j++) {
CacheServerAdvisor.CacheServerProfile bsp = (CacheServerAdvisor.CacheServerProfile) others.get(j);
if (bsp.getPort() == bsPort2) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort3) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort4) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else {
fail("unexpected port " + bsp.getPort() + " in " + bsp);
}
}
}
});
vm3.invoke(new SerializableRunnable("Verify other locator on " + port1) {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
List others = ca.fetchControllers();
assertEquals(1, others.size());
{
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(0);
assertEquals(port1, cp.getPort());
}
others = ca.fetchBridgeServers();
assertEquals(3, others.size());
for (int j = 0; j < others.size(); j++) {
CacheServerAdvisor.CacheServerProfile bsp = (CacheServerAdvisor.CacheServerProfile) others.get(j);
if (bsp.getPort() == bsPort2) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort3) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort4) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else {
fail("unexpected port " + bsp.getPort() + " in " + bsp);
}
}
}
});
SerializableRunnable disconnect = new SerializableRunnable("Disconnect from " + locators) {
public void run() {
InternalDistributedSystem.getAnyInstance().disconnect();
}
};
SerializableRunnable stopLocator = new SerializableRunnable("Stop locator") {
public void run() {
assertTrue(Locator.hasLocator());
Locator.getLocator().stop();
assertFalse(Locator.hasLocator());
}
};
vm0.invoke(stopLocator);
// now make sure everyone else saw the locator go away
vm3.invoke(new SerializableRunnable("Verify locator stopped ") {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
List others = ca.fetchControllers();
assertEquals(0, others.size());
}
});
vm2.invoke(new SerializableRunnable("Verify bridge server saw locator stop") {
public void run() {
Cache c = CacheFactory.getAnyInstance();
List bslist = c.getCacheServers();
assertEquals(2, bslist.size());
for (int i = 0; i < bslist.size(); i++) {
DistributionAdvisee advisee = (DistributionAdvisee) bslist.get(i);
CacheServerAdvisor bsa = (CacheServerAdvisor) advisee.getDistributionAdvisor();
List others = bsa.fetchControllers();
assertEquals(1, others.size());
for (int j = 0; j < others.size(); j++) {
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(j);
if (cp.getPort() == port2) {
assertEquals("locator2HNFC", cp.getHost());
// ok
} else {
fail("unexpected port " + cp.getPort() + " in " + cp);
}
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify bridge server saw locator stop") {
public void run() {
Cache c = CacheFactory.getAnyInstance();
List bslist = c.getCacheServers();
assertEquals(2, bslist.size());
for (int i = 0; i < bslist.size(); i++) {
DistributionAdvisee advisee = (DistributionAdvisee) bslist.get(i);
if (i == 0) {
// skip this one since it is stopped
continue;
}
CacheServerAdvisor bsa = (CacheServerAdvisor) advisee.getDistributionAdvisor();
List others = bsa.fetchControllers();
assertEquals(1, others.size());
for (int j = 0; j < others.size(); j++) {
ControllerAdvisor.ControllerProfile cp = (ControllerAdvisor.ControllerProfile) others.get(j);
if (cp.getPort() == port2) {
assertEquals("locator2HNFC", cp.getHost());
// ok
} else {
fail("unexpected port " + cp.getPort() + " in " + cp);
}
}
}
}
});
SerializableRunnable restartBS = new SerializableRunnable("restart bridge server") {
public void run() {
try {
Cache c = CacheFactory.getAnyInstance();
List bslist = c.getCacheServers();
assertEquals(2, bslist.size());
CacheServer bs = (CacheServer) bslist.get(0);
bs.setHostnameForClients("nameForClients");
bs.start();
} catch (IOException ex) {
RuntimeException re = new RuntimeException();
re.initCause(ex);
throw re;
}
}
};
// restart bridge server 1 and see if controller sees it
vm1.invoke(restartBS);
vm3.invoke(new SerializableRunnable("Verify bridge server restart ") {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
assertEquals(0, ca.fetchControllers().size());
List others = ca.fetchBridgeServers();
assertEquals(4, others.size());
for (int j = 0; j < others.size(); j++) {
CacheServerAdvisor.CacheServerProfile bsp = (CacheServerAdvisor.CacheServerProfile) others.get(j);
if (bsp.getPort() == bsPort1) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
assertEquals("nameForClients", bsp.getHost());
} else if (bsp.getPort() == bsPort2) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
assertFalse(bsp.getHost().equals("nameForClients"));
} else if (bsp.getPort() == bsPort3) {
assertEquals(Arrays.asList(new String[] { "bs1Group1", "bs1Group2" }), Arrays.asList(bsp.getGroups()));
} else if (bsp.getPort() == bsPort4) {
assertEquals(Arrays.asList(new String[] { "bs2Group1", "bs2Group2" }), Arrays.asList(bsp.getGroups()));
} else {
fail("unexpected port " + bsp.getPort() + " in " + bsp);
}
}
}
});
vm1.invoke(disconnect);
vm2.invoke(disconnect);
// now make sure controller saw all bridge servers stop
vm3.invoke(new SerializableRunnable("Verify locator stopped ") {
public void run() {
assertTrue(Locator.hasLocator());
InternalLocator l = (InternalLocator) Locator.getLocator();
DistributionAdvisee advisee = l.getServerLocatorAdvisee();
ControllerAdvisor ca = (ControllerAdvisor) advisee.getDistributionAdvisor();
assertEquals(0, ca.fetchControllers().size());
assertEquals(0, ca.fetchBridgeServers().size());
}
});
vm3.invoke(stopLocator);
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class PartitionedRegionSingleHopWithServerGroupDUnitTest method createServerWithLocatorAndServerGroup2Regions.
public static int createServerWithLocatorAndServerGroup2Regions(String locator, int localMaxMemory, int redundantCopies, int totalNoofBuckets, String group) {
Properties props = new Properties();
props = new Properties();
props.setProperty(LOCATORS, locator);
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "PoolImpl.honourServerGroupsInPRSingleHop", "true");
PartitionedRegionSingleHopWithServerGroupDUnitTest test = new PartitionedRegionSingleHopWithServerGroupDUnitTest();
DistributedSystem ds = test.getSystem(props);
cache = CacheFactory.create(ds);
CacheServer server = cache.addCacheServer();
if (group.length() != 0) {
StringTokenizer t = new StringTokenizer(group, ",");
String[] a = new String[t.countTokens()];
int i = 0;
while (t.hasMoreTokens()) {
a[i] = t.nextToken();
i++;
}
server.setGroups(a);
}
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
server.setPort(port);
server.setHostnameForClients("localhost");
try {
server.start();
} catch (IOException e) {
Assert.fail("Failed to start server ", e);
}
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets);
AttributesFactory attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
region = cache.createRegion(PR_NAME, attr.create());
assertNotNull(region);
LogWriterUtils.getLogWriter().info("Partitioned Region " + PR_NAME + " created Successfully :" + region.toString());
// creating colocated Regions
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
customerRegion = cache.createRegion("CUSTOMER", attr.create());
assertNotNull(customerRegion);
LogWriterUtils.getLogWriter().info("Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString());
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
orderRegion = cache.createRegion("ORDER", attr.create());
assertNotNull(orderRegion);
LogWriterUtils.getLogWriter().info("Partitioned Region ORDER created Successfully :" + orderRegion.toString());
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
assertNotNull(shipmentRegion);
LogWriterUtils.getLogWriter().info("Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString());
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets);
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
region2 = cache.createRegion(PR_NAME2, attr.create());
assertNotNull(region2);
LogWriterUtils.getLogWriter().info("Partitioned Region " + PR_NAME2 + " created Successfully :" + region2.toString());
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
customerRegion2 = cache.createRegion(CUSTOMER2, attr.create());
assertNotNull(customerRegion2);
LogWriterUtils.getLogWriter().info("Partitioned Region CUSTOMER2 created Successfully :" + customerRegion2.toString());
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
orderRegion2 = cache.createRegion(ORDER2, attr.create());
assertNotNull(orderRegion2);
LogWriterUtils.getLogWriter().info("Partitioned Region ORDER2 created Successfully :" + orderRegion2.toString());
paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
attr = new AttributesFactory();
attr.setPartitionAttributes(paf.create());
shipmentRegion2 = cache.createRegion(SHIPMENT2, attr.create());
assertNotNull(shipmentRegion2);
LogWriterUtils.getLogWriter().info("Partitioned Region SHIPMENT2 created Successfully :" + shipmentRegion2.toString());
return port;
}
Aggregations