use of org.apache.geode.cache.execute.RegionFunctionContext in project geode by apache.
the class PRColocatedEquiJoinDUnitTest method testPRRRNonLocalQueryingWithNoRROnOneNode.
/**
* A very basic dunit test that <br>
* 1. Creates two PR Data Stores with redundantCopies = 1. 2. Populates the region with test data.
* 3. Fires a LOCAL query on one data store VM and verifies the result.
*
* @throws Exception
*/
@Test
public void testPRRRNonLocalQueryingWithNoRROnOneNode() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
// Creting PR's on the participating VM's
// Creating DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, 0, Portfolio.class));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, 0, Portfolio.class));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
// Creating Colocated Region DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the RR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloName, NewPortfolio.class));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
// Putting the data into the PR's created
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
// querying the VM for data and comparing the result with query result of
// local region.
// querying the VM for data
vm0.invoke(new CacheSerializableRunnable("PRQuery") {
@Override
public void run2() throws CacheException {
// Helper classes and function
class TestQueryFunction extends FunctionAdapter {
@Override
public boolean hasResult() {
return true;
}
@Override
public boolean isHA() {
return false;
}
private final String id;
public TestQueryFunction(String id) {
super();
this.id = id;
}
@Override
public void execute(FunctionContext context) {
Cache cache = CacheFactory.getAnyInstance();
QueryService queryService = cache.getQueryService();
ArrayList allQueryResults = new ArrayList();
String qstr = (String) context.getArguments();
try {
Query query = queryService.newQuery(qstr);
context.getResultSender().sendResult((ArrayList) ((SelectResults) query.execute((RegionFunctionContext) context)).asList());
context.getResultSender().lastResult(null);
} catch (Exception e) {
e.printStackTrace();
throw new FunctionException(e);
}
}
@Override
public String getId() {
return this.id;
}
}
Cache cache = getCache();
// Querying the PR region
String[] queries = new String[] { "r1.ID = r2.id" };
Object[][] r = new Object[queries.length][2];
Region region = null;
region = cache.getRegion(name);
assertNotNull(region);
region = cache.getRegion(coloName);
assertNotNull(region);
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
getCache().getLogger().info("About to execute local query: " + queries[j]);
Function func = new TestQueryFunction("testfunction");
Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2 where " + queries[j]).execute(func).getResult();
r[j][0] = ((ArrayList) funcResult).get(0);
}
fail("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully without RR region on one of the nodes");
} catch (FunctionException e) {
if (e.getCause() instanceof RegionNotFoundException) {
LogWriterUtils.getLogWriter().info("Query received FunctionException successfully while using QueryService.");
} else {
fail("RegionNotFoundException must be thrown here");
}
}
}
});
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
}
use of org.apache.geode.cache.execute.RegionFunctionContext in project geode by apache.
the class TestFunction method executeWithLastResult.
private void executeWithLastResult(FunctionContext context) {
RegionFunctionContext rfContext = (RegionFunctionContext) context;
final PartitionedRegion pr = (PartitionedRegion) rfContext.getDataSet();
ResourceManager resMan = pr.getCache().getResourceManager();
RebalanceFactory factory = resMan.createRebalanceFactory();
RebalanceOperation rebalanceOp = factory.start();
try {
RebalanceResults rebalanceResults = rebalanceOp.getResults();
} catch (CancellationException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
context.getResultSender().lastResult((Serializable) context.getArguments());
}
use of org.apache.geode.cache.execute.RegionFunctionContext in project geode by apache.
the class TestFunction method execute4.
public void execute4(FunctionContext context) {
if (context instanceof RegionFunctionContext) {
RegionFunctionContext prContext = (RegionFunctionContext) context;
prContext.getDataSet().getCache().getLogger().info("Executing function : TestFunction4-7.execute " + prContext);
if (prContext.getArguments() instanceof Boolean) {
/* return prContext.getArguments(); */
if (hasResult())
prContext.getResultSender().lastResult((Serializable) prContext.getArguments());
} else if (prContext.getArguments() instanceof String) {
String key = (String) prContext.getArguments();
/* return (Serializable)PartitionRegionHelper.getLocalDataForContext(prContext).get(key); */
prContext.getResultSender().lastResult((Serializable) PartitionRegionHelper.getLocalDataForContext(prContext).get(key));
} else if (prContext.getArguments() instanceof Set) {
Set origKeys = (Set) prContext.getArguments();
ArrayList vals = new ArrayList();
for (Iterator i = origKeys.iterator(); i.hasNext(); ) {
Object val = PartitionRegionHelper.getLocalDataForContext(prContext).get(i.next());
if (val != null) {
vals.add(val);
}
}
// prContext.getResultSender().sendResult(vals);
if (hasResult())
prContext.getResultSender().lastResult(vals);
} else if (prContext.getArguments() instanceof HashMap) {
HashMap putData = (HashMap) prContext.getArguments();
for (Iterator i = putData.entrySet().iterator(); i.hasNext(); ) {
Map.Entry me = (Map.Entry) i.next();
prContext.getDataSet().put(me.getKey(), me.getValue());
}
// prContext.getResultSender().sendResult(Boolean.TRUE);
if (hasResult())
prContext.getResultSender().lastResult(Boolean.TRUE);
} else {
// prContext.getResultSender().sendResult(Boolean.FALSE);
if (hasResult())
prContext.getResultSender().lastResult(Boolean.FALSE);
}
} else {
// context.getResultSender().sendResult(Boolean.FALSE);
if (hasResult())
context.getResultSender().lastResult(Boolean.FALSE);
}
}
use of org.apache.geode.cache.execute.RegionFunctionContext in project geode by apache.
the class DistributedRegionFunction method execute.
@Override
public void execute(FunctionContext context) {
RegionFunctionContext rcontext = (RegionFunctionContext) context;
Region<Object, Object> region = rcontext.getDataSet();
InternalDistributedSystem sys = InternalDistributedSystem.getConnectedInstance();
sys.getLogWriter().fine("DistributedRegionFunction#execute( " + rcontext + " )");
Assert.assertTrue(region.getAttributes().getDataPolicy().withStorage());
Assert.assertTrue(region.getAttributes().getDataPolicy() != DataPolicy.NORMAL);
Assert.assertTrue(rcontext.getFilter().size() == 20);
long startTime = System.currentTimeMillis();
// the body itself
if (Boolean.TRUE.equals(rcontext.getArguments())) {
// do not close cache in retry
if (!rcontext.isPossibleDuplicate()) {
sys.disconnect();
throw new CacheClosedException("Throwing CacheClosedException " + "to simulate failover during function exception");
}
} else {
WaitCriterion wc = new WaitCriterion() {
String excuse;
public boolean done() {
return false;
}
public String description() {
return excuse;
}
};
Wait.waitForCriterion(wc, 12000, 500, false);
}
long endTime = System.currentTimeMillis();
// intentionally doing region operation to cause cacheClosedException
region.put("execKey-201", new Integer(201));
if (rcontext.isPossibleDuplicate()) {
// Below operation is done when the
// function is reexecuted
region.put("execKey-202", new Integer(202));
region.put("execKey-203", new Integer(203));
}
sys.getLogWriter().fine("Time wait for Function Execution = " + (endTime - startTime));
for (int i = 0; i < 5000; i++) {
context.getResultSender().sendResult(Boolean.TRUE);
}
context.getResultSender().lastResult(Boolean.TRUE);
}
use of org.apache.geode.cache.execute.RegionFunctionContext in project geode by apache.
the class TestFunction method executeWithThrowException.
private void executeWithThrowException(FunctionContext context) {
DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
RegionFunctionContext rfContext = (RegionFunctionContext) context;
LogWriter logger = ds.getLogWriter();
logger.fine("Executing executeWithThrowException in TestFunction on Member : " + ds.getDistributedMember() + "with Context : " + context);
if (context.getArguments() instanceof Boolean) {
logger.fine("MyFunctionExecutionException Exception is intentionally thrown");
throw new MyFunctionExecutionException("I have been thrown from TestFunction");
} else if (rfContext.getArguments() instanceof Set) {
Set origKeys = (Set) rfContext.getArguments();
for (Iterator i = origKeys.iterator(); i.hasNext(); ) {
Region r = PartitionRegionHelper.getLocalDataForContext(rfContext);
Object key = i.next();
Object val = r.get(key);
if (val != null) {
throw new MyFunctionExecutionException("I have been thrown from TestFunction");
}
}
} else {
logger.fine("Result sent back :" + Boolean.FALSE);
rfContext.getResultSender().lastResult(Boolean.FALSE);
}
}
Aggregations