use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class ClientServerInvalidAndDestroyedEntryDUnitTest method doTestRegisterInterestRemovesOldEntry.
private void doTestRegisterInterestRemovesOldEntry(final String regionName, final boolean usePR) throws Exception {
VM vm1 = Host.getHost(0).getVM(1);
VM vm2 = Host.getHost(0).getVM(2);
// here are the keys that will be used to validate behavior. Keys must be
// colocated if using both a partitioned region in the server and transactions
// in the client. All of these keys hash to bucket 0 in a two-bucket PR
// except Object11 and IDoNotExist1
final String key10 = "Object10";
final String interestPattern = "Object.*";
SerializableCallableIF createServer = getCreateServerCallable(regionName, usePR);
int serverPort = (Integer) vm1.invoke(createServer);
vm2.invoke(createServer);
vm1.invoke(new SerializableRunnable("populate server") {
public void run() {
Region myRegion = getCache().getRegion(regionName);
for (int i = 1; i <= 20; i++) {
myRegion.put("Object" + i, "Value" + i);
}
}
});
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("creating client cache");
ClientCache c = new ClientCacheFactory().addPoolServer("localhost", serverPort).set(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel()).setPoolSubscriptionEnabled(true).create();
Region myRegion = c.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
;
myRegion.registerInterestRegex(interestPattern);
// make sure key1 is in the client because we're going to mess with it
assertNotNull(myRegion.get(key10));
// remove the entry for key1 on the servers and then simulate interest recovery
// to show that the entry for key1 is no longer there in the client when recovery
// finishes
SerializableRunnable destroyKey10 = new SerializableRunnable("locally destroy " + key10 + " in the servers") {
public void run() {
Region myRegion = getCache().getRegion(regionName);
EntryEventImpl event = ((LocalRegion) myRegion).generateEvictDestroyEvent(key10);
event.setOperation(Operation.LOCAL_DESTROY);
if (usePR) {
BucketRegion bucket = ((PartitionedRegion) myRegion).getBucketRegion(key10);
if (bucket != null) {
event.setRegion(bucket);
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("performing local destroy in " + bucket + " ccEnabled=" + bucket.concurrencyChecksEnabled + " rvv=" + bucket.getVersionVector());
// turn off cc so entry is removed
bucket.concurrencyChecksEnabled = false;
bucket.mapDestroy(event, false, false, null);
bucket.concurrencyChecksEnabled = true;
}
} else {
// turn off cc so entry is
((LocalRegion) myRegion).concurrencyChecksEnabled = false;
// removed
((LocalRegion) myRegion).mapDestroy(event, false, false, null);
((LocalRegion) myRegion).concurrencyChecksEnabled = true;
}
}
};
vm1.invoke(destroyKey10);
vm2.invoke(destroyKey10);
myRegion.getCache().getLogger().info("clearing keys of interest");
((LocalRegion) myRegion).clearKeysOfInterest(interestPattern, InterestType.REGULAR_EXPRESSION, InterestResultPolicy.KEYS_VALUES);
myRegion.getCache().getLogger().info("done clearing keys of interest");
assertTrue("expected region to be empty but it has " + myRegion.size() + " entries", myRegion.size() == 0);
RegionEntry entry;
entry = ((LocalRegion) myRegion).getRegionEntry(key10);
// it should have been removed
assertNull(entry);
// now register interest. At the end, finishRegisterInterest should clear
// out the entry for key1 because it was stored in image-state as a
// destroyed RI entry in clearKeysOfInterest
myRegion.registerInterestRegex(interestPattern);
entry = ((LocalRegion) myRegion).getRegionEntry(key10);
// it should not be there
assertNull(entry);
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class AcceptorImplDUnitTest method testAcceptorImplCloseCleansUpWithHangingConnection.
/**
* GEODE-2324. There was a bug where, due to an uncaught exception, `AcceptorImpl.close()` was
* short-circuiting and failing to clean up properly.
*
* What this test does is start a Cache and hook the Acceptor to interrupt the thread before the
* place where an InterruptedException could be thrown. It interrupts the thread, and checks that
* the thread has terminated normally without short-circuiting. It doesn't check that every part
* of the AcceptorImpl has shut down properly -- that seems both difficult to check (especially
* since the fields are private) and implementation-dependent.
*/
@Test
public void testAcceptorImplCloseCleansUpWithHangingConnection() throws Exception {
final String hostname = Host.getHost(0).getHostName();
final VM clientVM = Host.getHost(0).getVM(0);
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
try (InternalCache cache = (InternalCache) new CacheFactory(props).create()) {
RegionFactory<Object, Object> regionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
SleepyCacheWriter<Object, Object> sleepyCacheWriter = new SleepyCacheWriter<>();
regionFactory.setCacheWriter(sleepyCacheWriter);
final CacheServer server = cache.addCacheServer();
final int port = AvailablePortHelper.getRandomAvailableTCPPort();
server.setPort(port);
server.start();
regionFactory.create("region1");
assertTrue(cache.isServer());
assertFalse(cache.isClosed());
Awaitility.await("Acceptor is up and running").atMost(10, SECONDS).until(() -> getAcceptorImplFromCache(cache) != null);
AcceptorImpl acceptorImpl = getAcceptorImplFromCache(cache);
clientVM.invokeAsync(() -> {
// System.setProperty("gemfire.PoolImpl.TRY_SERVERS_ONCE", "true");
ClientCacheFactory clientCacheFactory = new ClientCacheFactory();
clientCacheFactory.addPoolServer(hostname, port);
clientCacheFactory.setPoolReadTimeout(5000);
clientCacheFactory.setPoolRetryAttempts(1);
clientCacheFactory.setPoolMaxConnections(1);
clientCacheFactory.setPoolFreeConnectionTimeout(1000);
ClientCache clientCache = clientCacheFactory.create();
Region<Object, Object> clientRegion1 = clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY).create("region1");
clientRegion1.put("foo", "bar");
});
Awaitility.await("Cache writer starts").atMost(10, SECONDS).until(sleepyCacheWriter::isStarted);
cache.close();
Awaitility.await("Cache writer interrupted").atMost(10, SECONDS).until(sleepyCacheWriter::isInterrupted);
sleepyCacheWriter.stopWaiting();
Awaitility.await("Acceptor shuts down properly").atMost(10, SECONDS).until(() -> acceptorImpl.isShutdownProperly());
// for debugging.
ThreadUtils.dumpMyThreads();
regionFactory.setCacheWriter(null);
}
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class RegionCreateDestroyDUnitTest method testCreateDestroyReservedRegion.
// GEODE-1878
@Category(FlakyTest.class)
@Test
public void testCreateDestroyReservedRegion() throws InterruptedException {
Cache serverCache = getCache();
try {
serverCache.createRegionFactory(RegionShortcut.REPLICATE).create(RESERVED_REGION_NAME);
fail("Should have thrown an IllegalArgumentException");
} catch (IllegalArgumentException arg) {
assertEquals("Region names may not begin with a double-underscore: __ReservedRegion", arg.getMessage());
}
try {
startServer(serverCache);
} catch (IOException e) {
fail(e.getMessage());
}
try {
client1.invoke(() -> {
ClientCache cache = new ClientCacheFactory(createClientProperties()).setPoolSubscriptionEnabled(true).addPoolServer("localhost", serverPort).create();
try {
cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(RESERVED_REGION_NAME);
fail("Should have thrown an IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertEquals("Region names may not begin with a double-underscore: __ReservedRegion", e.getMessage());
}
});
} catch (RMIException rmi) {
rmi.getCause();
}
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class RegionCreateDestroyDUnitTest method testCreateDestroyValidRegion.
// GEODE-1922
@Category(FlakyTest.class)
@Test
public void testCreateDestroyValidRegion() throws InterruptedException {
Cache serverCache = getCache();
serverCache.createRegionFactory(RegionShortcut.REPLICATE).create(GOOD_REGION_NAME);
try {
startServer(serverCache);
} catch (IOException e) {
fail(e.getMessage());
}
client1.invoke(() -> {
ClientCache cache = new ClientCacheFactory(createClientProperties()).setPoolSubscriptionEnabled(true).addPoolServer("localhost", serverPort).create();
Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(GOOD_REGION_NAME);
region.destroyRegion();
assertThat(region.isDestroyed()).isTrue();
});
}
use of org.apache.geode.cache.client.ClientCacheFactory in project geode by apache.
the class PdxQueryDUnitTest method testPdxInstanceNoFieldNoMethod.
/**
* Test to query a field that is not present in the Pdx object Also the implicit method is absent
* in the class
*
* @throws CacheException
*/
@Test
public void testPdxInstanceNoFieldNoMethod() throws CacheException {
final Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
final VM vm3 = host.getVM(3);
final int numberOfEntries = 10;
final String name = "/" + regionName;
final String[] qs = { "select * from " + name + " where pdxStatus = 'active'", "select pdxStatus from " + name + " where id > 4" };
// Start server1
final int port1 = (Integer) vm0.invoke(new SerializableCallable("Create Server1") {
@Override
public Object call() throws Exception {
Region r1 = getCache().createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
CacheServer server = getCache().addCacheServer();
int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
server.setPort(port);
server.start();
return port;
}
});
// create client and load only version 1 objects with no pdxStatus field
vm3.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
ClientCache cache = getClientCache(cf);
Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
// Load version 1 objects
for (int i = 0; i < numberOfEntries; i++) {
PdxInstanceFactory pdxInstanceFactory = PdxInstanceFactoryImpl.newCreator("PdxVersionedNewPortfolio", false);
pdxInstanceFactory.writeInt("id", i);
pdxInstanceFactory.writeString("status", (i % 2 == 0 ? "active" : "inactive"));
PdxInstance pdxInstance = pdxInstanceFactory.create();
region.put("key-" + i, pdxInstance);
}
return null;
}
});
// Version1 class loader
vm3.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
// Load version 1 classloader
QueryService remoteQueryService = null;
// Execute query remotely
try {
remoteQueryService = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
for (int i = 0; i < qs.length; i++) {
try {
SelectResults sr = (SelectResults) remoteQueryService.newQuery(qs[i]).execute();
if (i == 1) {
assertEquals(5, sr.size());
for (Object o : sr) {
if (!(o instanceof Undefined)) {
fail("Result should be Undefined and not " + o.getClass());
}
}
} else {
assertEquals(0, sr.size());
}
} catch (Exception e) {
Assert.fail("Failed executing " + qs[i], e);
}
}
return null;
}
});
Invoke.invokeInEveryVM(DistributedTestCase.class, "disconnectFromDS");
}
Aggregations