use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class PdxLocalQueryDUnitTest method testLocalPdxQueriesOnPR.
@Test
public void testLocalPdxQueriesOnPR() throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client = host.getVM(2);
final int numberOfEntries = 10;
final String name = "/" + regionName;
final String[] queries = { "select * from " + name + " where position1 = $1", "select * from " + name + " where aDay = $1", // numberOfEntries
"select distinct * from " + name + " p where p.status = 'inactive'", // 1
"select distinct p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
"select p from " + name + " p where p.status = 'inactive'", // 4
"select * from " + name + " p, p.positions.values v where v.secId = 'IBM'", // 4
"select v from " + name + " p, p.positions.values v where v.secId = 'IBM'", // numberOfEntries
"select p.status from " + name + " p where p.status = 'inactive'", // numberOfEntries
"select distinct * from " + name + " p where p.status = 'inactive' order by p.ID", // 19
"select * from " + name + " p where p.status = 'inactive' or p.ID > 0", // numberOfEntries
"select * from " + name + " p where p.status = 'inactive' and p.ID >= 0", // numberOfEntries*2
"select * from " + name + " p where p.status in set ('inactive', 'active')", // 9
"select * from " + name + " p where p.ID > 0 and p.ID < 10", // numberOfEntries*2
"select v from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries*2
"select v.secId from " + name + " p, p.positions.values v where p.status = 'inactive'", "select distinct p from " + name + // numberOfEntries
" p, p.positions.values v where p.status = 'inactive' and v.pid >= 0", "select distinct p from " + name + // numberOfEntries*2
" p, p.positions.values v where p.status = 'inactive' or v.pid > 0", // numberOfEntries*2
"select distinct * from " + name + " p, p.positions.values v where p.status = 'inactive'", // numberOfEntries
"select * from " + name + ".values v where v.status = 'inactive'", // 19
"select v from " + name + " v where v in (select p from " + name + " p where p.ID > 0)", "select v from " + name + " v where v.status in (select distinct p.status from " + name + // numberOfEntries
" p where p.status = 'inactive')", "select * from " + name + " v where v.status = ELEMENT (select distinct p.status from " + name + // numberOfEntries
" p where p.status = 'inactive')" };
final int[] results = { 2, 3, numberOfEntries, 1, numberOfEntries, 4, 4, numberOfEntries, numberOfEntries, 19, numberOfEntries, numberOfEntries * 2, 9, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, numberOfEntries * 2, numberOfEntries * 2, numberOfEntries, 19, numberOfEntries, numberOfEntries };
// Start server1
final int port1 = (Integer) server1.invoke(new SerializableCallable("Create Server1") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
for (int i = 0; i < numberOfEntries; i++) {
r1.put("key-" + i, new PortfolioPdx(i));
}
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// Start server2
final int port2 = (Integer) server2.invoke(new SerializableCallable("Create Server2") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName);
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// client loads pdx objects on server
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
ClientCache cache = getClientCache(cf);
Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
region.put("key-" + i, new PortfolioPdx(i));
}
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = cache.getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
for (Object result : sr) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
return null;
}
});
// query locally on server1
server1.invoke(new SerializableCallable("query locally on server1") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
QueryService qs = null;
SelectResults sr = null;
// Execute query locally
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr.size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr.size());
for (Object result : sr) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
return null;
}
});
// query locally on server2
server2.invoke(new SerializableCallable("query locally on server2") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
QueryService qs = null;
SelectResults[][] sr = new SelectResults[queries.length][2];
// Execute query locally
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
PositionPdx pos = new PositionPdx("IBM", 100);
PortfolioPdx.Day pDay = new PortfolioPdx(1).aDay;
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr[i][0] = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][0].size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][0].size());
for (Object result : sr[i][0]) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
// create index
qs.createIndex("statusIndex", "p.status", name + " p");
qs.createIndex("IDIndex", "ID", name);
qs.createIndex("pIdIndex", "pos.getPid()", name + " p, p.positions.values pos");
qs.createIndex("secIdIndex", "pos.secId", name + " p, p.positions.values pos");
for (int i = 0; i < queries.length; i++) {
try {
if (i == 0) {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pos });
} else if (i == 1) {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute(new Object[] { pDay });
} else {
sr[i][1] = (SelectResults) qs.newQuery(queries[i]).execute();
}
assertTrue("Size of resultset should be greater than 0 for query: " + queries[i], sr[i][1].size() > 0);
assertEquals("Expected and actual results do not match for query: " + queries[i], results[i], sr[i][1].size());
for (Object result : sr[i][1]) {
if (result instanceof Struct) {
Object[] r = ((Struct) result).getFieldValues();
for (int j = 0; j < r.length; j++) {
if (r[j] instanceof PdxInstance || r[j] instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + r[j].getClass() + " for query: " + queries[i]);
}
}
} else if (result instanceof PdxInstance || result instanceof PdxString) {
fail("Result object should be a domain object and not an instance of " + result.getClass() + " for query: " + queries[i]);
}
}
} catch (Exception e) {
Assert.fail("Failed executing query " + queries[i], e);
}
}
StructSetOrResultsSet ssOrrs = new StructSetOrResultsSet();
ssOrrs.CompareQueryResultsWithoutAndWithIndexes(sr, queries.length, queries);
return null;
}
});
this.closeClient(client);
this.closeClient(server1);
this.closeClient(server2);
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class PdxLocalQueryVersionedClassDUnitTest method testIsRemoteFlagForRemoteQueries.
/**
* Testing the isRemote flag which could be inconsistent when bind queries are being executed in
* multiple threads. Bug #49662 is caused because of this inconsistent behavior.
*
* @throws Exception
*/
@Test
public void testIsRemoteFlagForRemoteQueries() throws Exception {
final Host host = Host.getHost(0);
final VM server = host.getVM(0);
final VM client = host.getVM(1);
final int numberOfEntries = 1000;
final String name = "/" + regionName;
final String query = "select distinct * from " + name + " where id > $1 and id < $2 and status = 'active'";
// Start server
final int port1 = (Integer) server.invoke(new SerializableCallable("Create Server") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// Start client and put version1 objects on server
// Server does not have version1 classes in classpath
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port1);
ClientCache cache = getClientCache(cf);
Region region = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
for (int i = 0; i < numberOfEntries; i++) {
PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedNewPortfolio", false);
pdxInstanceFactory.writeInt("id", i);
pdxInstanceFactory.writeString("status", (i % 2 == 0 ? "active" : "inactive"));
PdxInstance pdxInstance = pdxInstanceFactory.create();
region.put("key-" + i, pdxInstance);
}
return null;
}
});
// Execute same query remotely from client using 2 threads
// Since this is a bind query, the query object will be shared
// between the 2 threads.
AsyncInvocation a1 = client.invokeAsync(new SerializableCallable("Query from client") {
@Override
public Object call() throws Exception {
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
try {
for (int i = 0; i < 100; i++) {
sr = (SelectResults) qs.newQuery(query).execute(new Object[] { 1, 1000 });
}
Assert.assertTrue("Size of resultset should be greater than 0 for query: " + query, sr.size() > 0);
} catch (Exception e) {
Assert.fail("Failed executing query " + query, e);
}
return null;
}
});
AsyncInvocation a2 = client.invokeAsync(new SerializableCallable("Query from client") {
@Override
public Object call() throws Exception {
QueryService qs = null;
SelectResults sr = null;
// Execute query remotely
try {
qs = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
try {
for (int i = 0; i < 100; i++) {
sr = (SelectResults) qs.newQuery(query).execute(new Object[] { 997, 1000 });
}
Assert.assertTrue("Size of resultset should be greater than 0 for query: " + query, sr.size() > 0);
} catch (Exception e) {
Assert.fail("Failed executing query " + query, e);
}
return null;
}
});
ThreadUtils.join(a1, 60 * 1000);
ThreadUtils.join(a2, 60 * 1000);
if (a1.exceptionOccurred()) {
Assert.fail("Failed query execution " + a1.getException().getMessage());
}
if (a2.exceptionOccurred()) {
Assert.fail("Failed query execution " + a2.getException());
}
this.closeClient(client);
this.closeClient(server);
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class SSLNoClientAuthDUnitTest method setUpClientVM.
public void setUpClientVM(String host, int port, boolean cacheServerSslenabled, boolean cacheServerSslRequireAuth, String keyStore, String trustStore) {
Properties gemFireProps = new Properties();
String cacheServerSslprotocols = "any";
String cacheServerSslciphers = "any";
String keyStorePath = TestUtil.getResourcePath(SSLNoClientAuthDUnitTest.class, keyStore);
String trustStorePath = TestUtil.getResourcePath(SSLNoClientAuthDUnitTest.class, trustStore);
// using new server-ssl-* properties
gemFireProps.put(SERVER_SSL_ENABLED, String.valueOf(cacheServerSslenabled));
gemFireProps.put(SERVER_SSL_PROTOCOLS, cacheServerSslprotocols);
gemFireProps.put(SERVER_SSL_CIPHERS, cacheServerSslciphers);
gemFireProps.put(SERVER_SSL_REQUIRE_AUTHENTICATION, String.valueOf(cacheServerSslRequireAuth));
gemFireProps.put(SERVER_SSL_KEYSTORE_TYPE, "jks");
gemFireProps.put(SERVER_SSL_KEYSTORE, keyStorePath);
gemFireProps.put(SERVER_SSL_KEYSTORE_PASSWORD, "password");
gemFireProps.put(SERVER_SSL_TRUSTSTORE, trustStorePath);
gemFireProps.put(SERVER_SSL_TRUSTSTORE_PASSWORD, "password");
StringWriter sw = new StringWriter();
PrintWriter writer = new PrintWriter(sw);
gemFireProps.list(writer);
System.out.println("Starting client ds with following properties \n" + sw.getBuffer());
ClientCacheFactory clientCacheFactory = new ClientCacheFactory(gemFireProps);
clientCacheFactory.addPoolServer(host, port);
clientCache = clientCacheFactory.create();
ClientRegionFactory<String, String> regionFactory = clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, String> region = regionFactory.create("serverRegion");
assertNotNull(region);
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class ClientServerCCEDUnitTest method createDurableClientRegion.
// For durable client QRM testing we need a backup queue (redundancy=1) and
// durable attributes. We also need to invoke readyForEvents()
private void createDurableClientRegion(final VM vm, final String regionName, final int port1, final int port2, final boolean ccEnabled) {
SerializableCallable createRegion = new SerializableCallable() {
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port1);
cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port2);
cf.setPoolSubscriptionEnabled(true);
cf.setPoolSubscriptionRedundancy(1);
// bug #50683 - secondary durable queue retains all GC messages
cf.set(DURABLE_CLIENT_ID, "" + vm.getPid());
cf.set(DURABLE_CLIENT_TIMEOUT, "" + 200);
cf.set(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
ClientCache cache = getClientCache(cf);
ClientRegionFactory crf = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
crf.setConcurrencyChecksEnabled(ccEnabled);
TestRegion = (LocalRegion) crf.create(regionName);
TestRegion.registerInterestRegex(".*", InterestResultPolicy.KEYS_VALUES, true, true);
cache.readyForEvents();
return null;
}
};
vm.invoke(createRegion);
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class ClientServerCCEDUnitTest method createClientRegion.
private void createClientRegion(final VM vm, final String regionName, final int port, final boolean ccEnabled, final ClientRegionShortcut clientRegionShortcut, final boolean registerInterest) {
SerializableCallable createRegion = new SerializableCallable() {
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port);
if (registerInterest) {
cf.setPoolSubscriptionEnabled(true);
}
cf.set(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
ClientCache cache = getClientCache(cf);
ClientRegionFactory crf = cache.createClientRegionFactory(clientRegionShortcut);
crf.setConcurrencyChecksEnabled(ccEnabled);
crf.setStatisticsEnabled(true);
TestRegion = (LocalRegion) crf.create(regionName);
if (registerInterest) {
TestRegion.registerInterestRegex(".*", InterestResultPolicy.KEYS_VALUES, false, true);
}
return null;
}
};
vm.invoke(createRegion);
}
Aggregations