use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class PdxLocalQueryDUnitTest method testLocalPdxQueriesOnPR.
@Test
public void testLocalPdxQueriesOnPR() throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client = host.getVM(2);
final int numberOfEntries = 10;
final String name = "/" + regionName;
final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
"select distinct * from " + name + " p where p.status = 'inactive'", // 1
"select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
"select p from " + name + " p where p.status = 'inactive'", // 4
"select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
"select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
"select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
"select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
"select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
"select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
"select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
"select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
"select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
"select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
" p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
" p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
"select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
"select * from " + name + ".values v where v.status = 'inactive'", // 19
"select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
" p where p.status = 'inactive')", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
" p where p.status = 'inactive')" };
final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, numberOfEntries };
// Start server1
final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
for (int i = 0; i < numberOfEntries; i++) {
r1.put("key-" + i, new PortfolioPdx(i));
}
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// Start server2
final int port2 = (Integer) server2.invoke(new SerializableCallable("Create Server2") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// client loads pdx objects on server
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
ClientCache cache = getClientCache(cf);
Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
region.put("key-" + i, new PortfolioPdx(i));
}
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = cache.getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
for (Object result : sr) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
return null;
}
});
// query locally on server1
server1.invoke(new SerializableCallable("query locally on server1") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
QueryService qs = null;
SelectResults sr = null;
// Execute query locally
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
for (Object result : sr) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
return null;
}
});
// query locally on server2
server2.invoke(new SerializableCallable("query locally on server2") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
QueryService qs = null;
SelectResults[][] sr = new SelectResults[queries.length][2];
// Execute query locally
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
for (Object result : sr[i][0]) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
// create index
qs.createIndex("statusIndex", "p.status", name + " p");
qs.createIndex("IDIndex", "ID", name);
qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
for (Object result : sr[i][1]) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
return null;
}
});
this.closeClient(client);
this.closeClient(server1);
this.closeClient(server2);
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class PdxLocalQueryVersionedClassDUnitTest method testIsRemoteFlagForRemoteQueries.
/**
* Testing the isRemote flag which could be inconsistent when bind queries are being executed in
* multiple threads. Bug #49662 is caused because of this inconsistent behavior.
*
* @throws Exception
*/
@Test
public void testIsRemoteFlagForRemoteQueries() throws Exception {
final Host host = Host.getHost(0);
final VM server = host.getVM(0);
final VM client = host.getVM(1);
final int numberOfEntries = 1000;
final String name = "/" + regionName;
final String query = "select distinct * from " + name + " where id > $1 and id < $2 and status = 'active'";
// Start server
final int port1 = (Integer) server.invoke(new SerializableCallable("Create Server") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// Start client and put version1 objects on server
// Server does not have version1 classes in classpath
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port1);
ClientCache cache = getClientCache(cf);
Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
for (int i = 0; i < numberOfEntries; i++) {
PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedNewPortfolio", false);
pdxInstanceFactory.writeInt("id", i);
pdxInstanceFactory.writeString("status", (i % 2 == 0 ? "active" : "inactive"));
PdxInstance pdxInstance = pdxInstanceFactory.create();
region.put("key-" + i, pdxInstance);
}
return null;
}
});
// Execute same query remotely from client using 2 threads
// Since this is a bind query, the query object will be shared
// between the 2 threads.
AsyncInvocation a1 = client.invokeAsync(new SerializableCallable("Query from client") {
@Override
public Object call() throws Exception {
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
try {
for (int i = 0; i < 100; i++) {
sr = (SelectResults) qs.newQuery(query).execute(new Object[] { 1, 1000 });
}
Assert.assertTrue("Size of resultset should be greater than 0 for query: " + query, sr.size() > 0);
} catch (Exception e) {
Assert.fail("Failed executing query " + query, e);
}
return null;
}
});
AsyncInvocation a2 = client.invokeAsync(new SerializableCallable("Query from client") {
@Override
public Object call() throws Exception {
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
try {
for (int i = 0; i < 100; i++) {
sr = (SelectResults) qs.newQuery(query).execute(new Object[] { 997, 1000 });
}
Assert.assertTrue("Size of resultset should be greater than 0 for query: " + query, sr.size() > 0);
} catch (Exception e) {
Assert.fail("Failed executing query " + query, e);
}
return null;
}
});
ThreadUtils.join(a1, 60 * 1000);
ThreadUtils.join(a2, 60 * 1000);
if (a1.exceptionOccurred()) {
Assert.fail("Failed query execution " + a1.getException().getMessage());
}
if (a2.exceptionOccurred()) {
Assert.fail("Failed query execution " + a2.getException());
}
this.closeClient(client);
this.closeClient(server);
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class PDXQueryTestBase method stopBridgeServer.
/**
* Stops the bridge server that serves up the given cache.
*/
protected void stopBridgeServer(Cache cache) {
CacheServer bridge = (CacheServer) cache.getCacheServers().iterator().next();
bridge.stop();
assertFalse(bridge.isRunning());
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class PDXQueryTestBase method startCacheServer.
/**
* Starts a bridge server on the given port, using the given deserializeValues and
* notifyBySubscription to serve up the given region.
*/
protected void startCacheServer(int port, boolean notifyBySubscription) throws IOException {
Cache cache = CacheFactory.getAnyInstance();
CacheServer bridge = cache.addCacheServer();
bridge.setPort(port);
bridge.setNotifyBySubscription(notifyBySubscription);
bridge.start();
bridgeServerPort = bridge.getPort();
}
use of org.apache.geode.cache.server.CacheServer in project geode by apache.
the class MemoryThresholdsDUnitTest method startCacheServer.
/**
* Starts up a CacheServer.
*
* @return a {@link ServerPorts} containing the CacheServer ports.
*/
private ServerPorts startCacheServer(VM server, final float evictionThreshold, final float criticalThreshold, final String regionName, final boolean createPR, final boolean notifyBySubscription, final int prRedundancy) throws Exception {
return (ServerPorts) server.invoke(new SerializableCallable() {
public Object call() throws Exception {
getSystem(getServerProperties());
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
InternalResourceManager irm = cache.getInternalResourceManager();
HeapMemoryMonitor hmm = irm.getHeapMonitor();
hmm.setTestMaxMemoryBytes(1000);
HeapMemoryMonitor.setTestBytesUsedForThresholdSet(500);
irm.setEvictionHeapPercentage(evictionThreshold);
irm.setCriticalHeapPercentage(criticalThreshold);
AttributesFactory factory = new AttributesFactory();
if (createPR) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(prRedundancy);
paf.setTotalNumBuckets(11);
factory.setPartitionAttributes(paf.create());
} else {
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
}
Region region = createRegion(regionName, factory.create());
if (createPR) {
assertTrue(region instanceof PartitionedRegion);
} else {
assertTrue(region instanceof DistributedRegion);
}
CacheServer cacheServer = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailableTCPPorts(1)[0];
cacheServer.setPort(port);
cacheServer.setNotifyBySubscription(notifyBySubscription);
cacheServer.start();
return new ServerPorts(port);
}
});
}
Aggregations