use of org.apache.geode.test.dunit.AsyncInvocation in project geode by apache.
the class RedisDistDUnitTest method testConcOps.
/**
* Just make sure there are no unexpected server crashes
*/
// GEODE-1697
@Category(FlakyTest.class)
@Test
public void testConcOps() throws Exception {
final int ops = 100;
final String hKey = TEST_KEY + "hash";
final String lKey = TEST_KEY + "list";
final String zKey = TEST_KEY + "zset";
final String sKey = TEST_KEY + "set";
class ConcOps extends ClientTestBase {
protected ConcOps(int port) {
super(port);
}
@Override
public Object call() throws Exception {
Jedis jedis = new Jedis(localHost, port, JEDIS_TIMEOUT);
Random r = new Random();
for (int i = 0; i < ops; i++) {
int n = r.nextInt(4);
if (n == 0) {
jedis.hset(hKey, randString(), randString());
jedis.hgetAll(hKey);
jedis.hvals(hKey);
} else if (n == 1) {
jedis.lpush(lKey, randString());
jedis.rpush(lKey, randString());
jedis.ltrim(lKey, 0, 100);
jedis.lrange(lKey, 0, -1);
} else if (n == 2) {
jedis.zadd(zKey, r.nextDouble(), randString());
jedis.zrangeByLex(zKey, "(a", "[z");
jedis.zrangeByScoreWithScores(zKey, 0, 1, 0, 100);
jedis.zremrangeByScore(zKey, r.nextDouble(), r.nextDouble());
} else {
jedis.sadd(sKey, randString());
jedis.smembers(sKey);
jedis.sdiff(sKey, "afd");
jedis.sunionstore("dst", sKey, "afds");
}
}
return null;
}
}
// Expect to run with no exception
AsyncInvocation i = client1.invokeAsync(new ConcOps(server1Port));
client2.invoke(new ConcOps(server2Port));
i.getResult();
}
use of org.apache.geode.test.dunit.AsyncInvocation in project geode by apache.
the class CqDataUsingPoolDUnitTest method testEventsDuringQueryExecution.
/**
* Test for events created during the CQ query execution. When CQs are executed using
* executeWithInitialResults there may be possibility that the region changes during that time may
* not be reflected in the query result set thus making the query data and region data
* inconsistent.
*/
@Test
public void testEventsDuringQueryExecution() throws Exception {
final Host host = Host.getHost(0);
VM server = host.getVM(0);
final VM client = host.getVM(1);
final String cqName = "testEventsDuringQueryExecution_0";
cqDUnitTest.createServer(server);
final int port = server.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server.getHost());
String poolName = "testEventsDuringQueryExecution";
cqDUnitTest.createPool(client, poolName, host0, port);
// create CQ.
cqDUnitTest.createCQ(client, poolName, cqName, cqDUnitTest.cqs[0]);
final int numObjects = 200;
final int totalObjects = 500;
// initialize Region.
server.invoke(new CacheSerializableRunnable("Update Region") {
@Override
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// First set testhook in executeWithInitialResults so that queued events
// are not drained before we verify there number.
client.invoke(setTestHook());
// Execute CQ while update is in progress.
AsyncInvocation executeCq = client.invokeAsync(new CacheSerializableRunnable("Execute CQ AsyncInvoke") {
@Override
public void run2() throws CacheException {
QueryService cqService = getCache().getQueryService();
// Get CqQuery object.
CqQuery cq1 = cqService.getCq(cqName);
if (cq1 == null) {
fail("Failed to get CQ " + cqName);
}
SelectResults cqResults = null;
try {
cqResults = cq1.executeWithInitialResults();
} catch (Exception ex) {
Assert.fail("CQ execution failed", ex);
}
// Check num of events received during executeWithInitialResults.
final TestHook testHook = CqQueryImpl.testHook;
Wait.waitForCriterion(new WaitCriterion() {
@Override
public boolean done() {
return testHook.numQueuedEvents() > 0;
}
@Override
public String description() {
return "No queued events found.";
}
}, 3000, 5, true);
getCache().getLogger().fine("Queued Events Size" + testHook.numQueuedEvents());
// Make sure CQEvents are queued during execute with initial results.
CqQueryTestListener cqListener = (CqQueryTestListener) cq1.getCqAttributes().getCqListener();
// Wait for the last key to arrive.
cqListener.waitForCreated("" + totalObjects);
// Check if the events from CqListener are in order.
int oldId = 0;
for (Object cqEvent : cqListener.events.toArray()) {
int newId = new Integer(cqEvent.toString()).intValue();
if (oldId > newId) {
fail("Queued events for CQ Listener during execution with " + "Initial results is not in the order in which they are created.");
}
oldId = newId;
}
// Check if all the IDs are present as part of Select Results and CQ Events.
HashSet ids = new HashSet(cqListener.events);
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
ids.add(s.get("key"));
}
HashSet missingIds = new HashSet();
String key = "";
for (int i = 1; i <= totalObjects; i++) {
key = "" + i;
if (!(ids.contains(key))) {
missingIds.add(key);
}
}
if (!missingIds.isEmpty()) {
fail("Missing Keys in either ResultSet or the Cq Event list. " + " Missing keys : [size : " + missingIds.size() + "]" + missingIds + " Ids in ResultSet and CQ Events :" + ids);
}
}
});
// Keep updating region (async invocation).
server.invoke(new CacheSerializableRunnable("Update Region") {
@Override
public void run2() throws CacheException {
Wait.pause(200);
client.invoke(new CacheSerializableRunnable("Releasing the latch") {
@Override
public void run2() throws CacheException {
// Now release the testhook so that CQListener can proceed.
final TestHook testHook = CqQueryImpl.testHook;
testHook.ready();
}
});
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = numObjects + 1; i <= totalObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Close.
cqDUnitTest.closeClient(client);
cqDUnitTest.closeServer(server);
}
use of org.apache.geode.test.dunit.AsyncInvocation in project geode by apache.
the class CqDataDUnitTest method testMultipleExecuteWithInitialResults.
/**
* This test was created to test executeWithInitialResults being called multiple times.
* Previously, the queueEvents would be overwritten and we would lose data. This test will execute
* the method twice. The first time, the first execution will block it's own child thread (TC1).
* The second execution will block until TC1 is completed (based on how executeWithInitialResults
* is implemented) A third thread will be awaken and release the latch in the testhook for TC1 to
* complete.
*
* @throws Exception
*/
@Test
public void testMultipleExecuteWithInitialResults() throws Exception {
final int numObjects = 200;
final int totalObjects = 500;
final Host host = Host.getHost(0);
VM server = host.getVM(0);
VM client = host.getVM(1);
client.invoke(setTestHook());
final String cqName = "testMultiExecuteWithInitialResults";
// initialize server and retreive host and port values
cqDUnitTest.createServer(server);
final int port = server.invoke(() -> CqQueryDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server.getHost());
// Initialize Client.
cqDUnitTest.createClient(client, port, host0);
// create CQ.
cqDUnitTest.createCQ(client, cqName, cqDUnitTest.cqs[0]);
// initialize Region.
server.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
// Wait to give client a chance to register the cq
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = numObjects + 1; i <= totalObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// the thread that validates all results and executes first
AsyncInvocation processCqs = client.invokeAsync(new CacheSerializableRunnable("Execute CQ first") {
public void run2() throws CacheException {
SelectResults cqResults = null;
QueryService cqService = getCache().getQueryService();
// Get CqQuery object.
CqQuery cq1 = cqService.getCq(cqName);
if (cq1 == null) {
fail("Failed to get CQ " + cqName);
}
try {
cqResults = cq1.executeWithInitialResults();
} catch (Exception e) {
AssertionError err = new AssertionError("Failed to execute CQ " + cqName);
err.initCause(e);
throw err;
}
CqQueryTestListener cqListener = (CqQueryTestListener) cq1.getCqAttributes().getCqListener();
// Wait for the last key to arrive.
cqListener.waitForCreated("" + totalObjects);
// Check if the events from CqListener are in order.
int oldId = 0;
for (Object cqEvent : cqListener.events.toArray()) {
int newId = new Integer(cqEvent.toString()).intValue();
if (oldId > newId) {
fail("Queued events for CQ Listener during execution with " + "Initial results is not in the order in which they are created.");
}
oldId = newId;
}
// Check if all the IDs are present as part of Select Results and CQ
// Events.
HashSet ids = new HashSet(cqListener.events);
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
ids.add(s.get("key"));
}
HashSet missingIds = new HashSet();
String key = "";
for (int i = 1; i <= totalObjects; i++) {
key = "" + i;
if (!(ids.contains(key))) {
missingIds.add(key);
}
}
if (!missingIds.isEmpty()) {
fail("Missing Keys in either ResultSet or the Cq Event list. " + " Missing keys : [size : " + missingIds.size() + "]" + missingIds + " Ids in ResultSet and CQ Events :" + ids);
}
}
});
// the second call to executeWithInitialResults. Goes to sleep hopefully
// long enough
// for the first call to executeWithInitialResults first
client.invokeAsync(new CacheSerializableRunnable("Execute CQ second") {
public void run2() throws CacheException {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
QueryService cqService = getCache().getQueryService();
// Get CqQuery object.
CqQuery cq1 = cqService.getCq(cqName);
if (cq1 == null) {
fail("Failed to get CQ " + cqName);
}
try {
cq1.executeWithInitialResults();
} catch (IllegalStateException e) {
// we expect an error due to the cq having already being in run state
} catch (Exception e) {
AssertionError err = new AssertionError("test hook lock interrupted" + cqName);
err.initCause(e);
throw err;
}
}
});
// thread that unlatches the test hook, sleeping long enough for both
// the other two threads to execute first
client.invokeAsync(new CacheSerializableRunnable("Release latch") {
public void run2() throws CacheException {
// had a chance to invoke executeWithInitialResults
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
AssertionError err = new AssertionError("test hook lock interrupted" + cqName);
err.initCause(e);
throw err;
}
CqQueryImpl.testHook.ready();
}
});
// wait for 60 seconds for test to complete
ThreadUtils.join(processCqs, 60 * 1000);
// Close.
cqDUnitTest.closeClient(client);
cqDUnitTest.closeServer(server);
}
use of org.apache.geode.test.dunit.AsyncInvocation in project geode by apache.
the class LuceneIndexDestroyDUnitTest method verifyDestroyAllIndexesWhileDoingPuts.
@Test
@Parameters(method = "getListOfRegionTestTypes")
public void verifyDestroyAllIndexesWhileDoingPuts(RegionTestableType regionType) throws Exception {
// Add ignored exceptions to ignore RegionDestroyExceptions
IgnoredException.addIgnoredException(RegionDestroyedException.class.getSimpleName());
// Create indexes and region
dataStore1.invoke(() -> initDataStore(createIndexes(), regionType));
dataStore2.invoke(() -> initDataStore(createIndexes(), regionType));
// Verify indexes created
dataStore1.invoke(() -> verifyIndexesCreated());
dataStore2.invoke(() -> verifyIndexesCreated());
// Start puts
AsyncInvocation putter = dataStore1.invokeAsync(() -> doPutsUntilStopped());
// Wait until puts have started
dataStore1.invoke(() -> waitUntilPutsHaveStarted());
// Destroy indexes (only needs to be done on one member)
dataStore1.invoke(() -> destroyIndexes());
// Verify indexes destroyed
dataStore1.invoke(() -> verifyIndexesDestroyed());
dataStore2.invoke(() -> verifyIndexesDestroyed());
// End puts
dataStore1.invoke(() -> stopPuts());
// Wait for the putter to complete and verify no unexpected exception has occurred
ThreadUtils.join(putter, 60 * 1000);
if (putter.exceptionOccurred()) {
fail(putter.getException());
}
// Verify region size
dataStore1.invoke(() -> verifyRegionSize());
}
use of org.apache.geode.test.dunit.AsyncInvocation in project geode by apache.
the class WANTestBase method preTearDown.
@Override
public final void preTearDown() throws Exception {
cleanupVM();
List<AsyncInvocation> invocations = new ArrayList<AsyncInvocation>();
final Host host = Host.getHost(0);
for (int i = 0; i < host.getVMCount(); i++) {
invocations.add(host.getVM(i).invokeAsync(() -> WANTestBase.cleanupVM()));
}
for (AsyncInvocation invocation : invocations) {
invocation.join();
invocation.checkException();
}
}
Aggregations