use of org.apache.geode.cache.client.ClientCache in project geode by apache.
the class ClientExecuteFunctionAuthDUnitTest method testExecuteRegionFunctionWithClientRegistration.
@Test
public void testExecuteRegionFunctionWithClientRegistration() {
FunctionService.registerFunction(function);
client1.invoke("logging in with dataReader", () -> {
ClientCache cache = createClientCache("dataReader", "1234567", server.getPort());
FunctionService.registerFunction(function);
assertNotAuthorized(() -> FunctionService.onServer(cache.getDefaultPool()).setArguments(Boolean.TRUE).execute(function.getId()), "DATA:WRITE");
});
client2.invoke("logging in with super-user", () -> {
ClientCache cache = createClientCache("super-user", "1234567", server.getPort());
FunctionService.registerFunction(function);
ResultCollector rc = FunctionService.onServer(cache.getDefaultPool()).setArguments(Boolean.TRUE).execute(function.getId());
rc.getResult();
});
}
use of org.apache.geode.cache.client.ClientCache in project geode by apache.
the class QueryMonitorDUnitTest method executeQueriesFromClient.
private void executeQueriesFromClient(int timeout) {
try {
ClientCache anyInstance = ClientCacheFactory.getAnyInstance();
((GemFireCacheImpl) anyInstance).testMaxQueryExecutionTime = timeout;
Pool pool = PoolManager.find(poolName);
QueryService queryService = pool.getQueryService();
executeQueriesAgainstQueryService(queryService);
} catch (Exception ex) {
GemFireCacheImpl.getInstance().getLogger().fine("Exception creating the query service", ex);
}
}
use of org.apache.geode.cache.client.ClientCache in project geode by apache.
the class ClientServerInvalidAndDestroyedEntryDUnitTest method doTestRegisterInterestRemovesOldEntry.
private void doTestRegisterInterestRemovesOldEntry(final String regionName, final boolean usePR) throws Exception {
VM vm1 = Host.getHost(0).getVM(1);
VM vm2 = Host.getHost(0).getVM(2);
// here are the keys that will be used to validate behavior. Keys must be
// colocated if using both a partitioned region in the server and transactions
// in the client. All of these keys hash to bucket 0 in a two-bucket PR
// except Object11 and IDoNotExist1
final String key10 = "Object10";
final String interestPattern = "Object.*";
SerializableCallableIF createServer = getCreateServerCallable(regionName, usePR);
int serverPort = (Integer) vm1.invoke(createServer);
vm2.invoke(createServer);
vm1.invoke(new SerializableRunnable("populate server") {
public void run() {
Region myRegion = getCache().getRegion(regionName);
for (int i = 1; i <= 20; i++) {
myRegion.put("Object" + i, "Value" + i);
}
}
});
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("creating client cache");
ClientCache c = new ClientCacheFactory().addPoolServer("localhost", serverPort).set(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel()).setPoolSubscriptionEnabled(true).create();
Region myRegion = c.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
;
myRegion.registerInterestRegex(interestPattern);
// make sure key1 is in the client because we're going to mess with it
assertNotNull(myRegion.get(key10));
// remove the entry for key1 on the servers and then simulate interest recovery
// to show that the entry for key1 is no longer there in the client when recovery
// finishes
SerializableRunnable destroyKey10 = new SerializableRunnable("locally destroy " + key10 + " in the servers") {
public void run() {
Region myRegion = getCache().getRegion(regionName);
EntryEventImpl event = ((LocalRegion) myRegion).generateEvictDestroyEvent(key10);
event.setOperation(Operation.LOCAL_DESTROY);
if (usePR) {
BucketRegion bucket = ((PartitionedRegion) myRegion).getBucketRegion(key10);
if (bucket != null) {
event.setRegion(bucket);
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("performing local destroy in " + bucket + " ccEnabled=" + bucket.concurrencyChecksEnabled + " rvv=" + bucket.getVersionVector());
// turn off cc so entry is removed
bucket.concurrencyChecksEnabled = false;
bucket.mapDestroy(event, false, false, null);
bucket.concurrencyChecksEnabled = true;
}
} else {
// turn off cc so entry is
((LocalRegion) myRegion).concurrencyChecksEnabled = false;
// removed
((LocalRegion) myRegion).mapDestroy(event, false, false, null);
((LocalRegion) myRegion).concurrencyChecksEnabled = true;
}
}
};
vm1.invoke(destroyKey10);
vm2.invoke(destroyKey10);
myRegion.getCache().getLogger().info("clearing keys of interest");
((LocalRegion) myRegion).clearKeysOfInterest(interestPattern, InterestType.REGULAR_EXPRESSION, InterestResultPolicy.KEYS_VALUES);
myRegion.getCache().getLogger().info("done clearing keys of interest");
assertTrue("expected region to be empty but it has " + myRegion.size() + " entries", myRegion.size() == 0);
RegionEntry entry;
entry = ((LocalRegion) myRegion).getRegionEntry(key10);
// it should have been removed
assertNull(entry);
// now register interest. At the end, finishRegisterInterest should clear
// out the entry for key1 because it was stored in image-state as a
// destroyed RI entry in clearKeysOfInterest
myRegion.registerInterestRegex(interestPattern);
entry = ((LocalRegion) myRegion).getRegionEntry(key10);
// it should not be there
assertNull(entry);
}
use of org.apache.geode.cache.client.ClientCache in project geode by apache.
the class AcceptorImplDUnitTest method testAcceptorImplCloseCleansUpWithHangingConnection.
/**
* GEODE-2324. There was a bug where, due to an uncaught exception, `AcceptorImpl.close()` was
* short-circuiting and failing to clean up properly.
*
* What this test does is start a Cache and hook the Acceptor to interrupt the thread before the
* place where an InterruptedException could be thrown. It interrupts the thread, and checks that
* the thread has terminated normally without short-circuiting. It doesn't check that every part
* of the AcceptorImpl has shut down properly -- that seems both difficult to check (especially
* since the fields are private) and implementation-dependent.
*/
@Test
public void testAcceptorImplCloseCleansUpWithHangingConnection() throws Exception {
final String hostname = Host.getHost(0).getHostName();
final VM clientVM = Host.getHost(0).getVM(0);
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
try (InternalCache cache = (InternalCache) new CacheFactory(props).create()) {
RegionFactory<Object, Object> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
SleepyCacheWriter<Object, Object> sleepyCacheWriter = new SleepyCacheWriter<>();
regionFactory.setCacheWriter(sleepyCacheWriter);
final CacheServer server = cache.addCacheServer();
final int port = AvailablePortHelper.getRandomAvailableTCPPort();
server.setPort(port);
server.start();
regionFactory.create("region1");
assertTrue(cache.isServer());
assertFalse(cache.isClosed());
Awaitility.await("Acceptor is up and running").atMost(10, SECONDS).until(() -> getAcceptorImplFromCache(cache) != null);
AcceptorImpl acceptorImpl = getAcceptorImplFromCache(cache);
clientVM.invokeAsync(() -> {
// System.setProperty("gemfire.PoolImpl.TRY_SERVERS_ONCE", "true");
ClientCacheFactory clientCacheFactory = new ClientCacheFactory();
clientCacheFactory.addPoolServer(hostname, port);
clientCacheFactory.setPoolReadTimeout(5000);
clientCacheFactory.setPoolRetryAttempts(1);
clientCacheFactory.setPoolMaxConnections(1);
clientCacheFactory.setPoolFreeConnectionTimeout(1000);
ClientCache clientCache = clientCacheFactory.create();
Region<Object, Object> clientRegion1 = clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY).create("region1");
clientRegion1.put("foo", "bar");
});
Awaitility.await("Cache writer starts").atMost(10, SECONDS).until(sleepyCacheWriter::isStarted);
cache.close();
Awaitility.await("Cache writer interrupted").atMost(10, SECONDS).until(sleepyCacheWriter::isInterrupted);
sleepyCacheWriter.stopWaiting();
Awaitility.await("Acceptor shuts down properly").atMost(10, SECONDS).until(() -> acceptorImpl.isShutdownProperly());
// for debugging.
ThreadUtils.dumpMyThreads();
regionFactory.setCacheWriter(null);
}
}
use of org.apache.geode.cache.client.ClientCache in project geode by apache.
the class GFSnapshotDUnitTest method populateDataOnClient.
private Object populateDataOnClient() {
ClientCache clientCache = ClientCacheFactory.getAnyInstance();
Region testRegion = clientCache.getRegion("TestRegion");
for (int i = 0; i < 100; i++) {
testRegion.put(i, new TestObject(i, "owner_" + i));
}
return null;
}
Aggregations