use of org.apache.geode.cache.query.SelectResults in project geode by apache.
the class QueryUtils method getRelationshipIndexResultsMergedWithIntermediateResults.
/**
* This function is used to evaluate a filter evaluatable CompositeCondition(ie Range Indexes
* available on both LHS & RHS operands).This function is invoked from AND junction evaluation of
* CompositeGroupJunction. It expands the intermediate resultset passed , to the level of groups
* determined by the LHS & RHS operand, using the range indexes. It is possible that the group of
* iterators for an operand of condition already exists in the intermediate resultset passed. In
* such situation, the intermediate resultset is iterated & the operand ( whose group of iterators
* are available in the intermediate resultset ) is evaluated. For each such evaluated value , the
* other operand's Range Index is queried & the Range Index's results are appropriately expanded &
* cut down & a final tuple obtained( which includes the previously existing fields of
* intermediate resultset). The array of independent iterators passed from the Composite Group
* junction will be null, except for the final condition ( subject to the fact that complete
* expansion flag is false. Otherwise even for final condition , the array will be null) as that
* array will be used to get the final position of iterators in the resultant StructBag
*
* TODO: break this method up
*
* @param intermediateResults SelectResults object containing the intermediate resultset obtained
* by evaluation of previous filter evaluatable composite conditions of the
* CompositeGroupJunction
* @param indxInfo Array of IndexInfo objects ( size 2), representing the range index for the two
* operands of the condition
* @param context ExecutionContext object
* @param completeExpansionNeeded A boolean when true indicates that the final result from
* Composite GroupJunction needs to be evaluated to the query from clause ( top ) level.
* @param iterOperands CompiledValue representing the conditions which are to be iter evaluated.
* This can exist only if instead of AllGroupJunction we have a single
* CompositeGroupJunction
* @param indpdntItrs Array of RuntimeIterators representing the independent iterators of their
* representative groups forming the CompositeGroupJunction *
* @return SelectResults The Result object created by evaluating the filter evaluatable condition
* merged with the intermediate results
*/
static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults(SelectResults intermediateResults, IndexInfo[] indxInfo, ExecutionContext context, boolean completeExpansionNeeded, CompiledValue iterOperands, RuntimeIterator[] indpdntItrs) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
ObjectType resultType1 = indxInfo[0]._index.getResultSetType();
int indexFieldsSize1 = resultType1 instanceof StructType ? ((StructTypeImpl) resultType1).getFieldNames().length : 1;
ObjectType resultType2 = indxInfo[1]._index.getResultSetType();
int indexFieldsSize2 = resultType2 instanceof StructType ? ((StructTypeImpl) resultType2).getFieldNames().length : 1;
/*
* even if the complete expansion is needed pass the flag of complete expansion as false. Thus
* for LHS & RHS we will get the expansionList for that individual group.
*/
// NOTE: use false for completeExpansion irrespective of actual value
IndexConditioningHelper ich1 = new IndexConditioningHelper(indxInfo[0], context, indexFieldsSize1, false, iterOperands, null);
// NOTE: use false for completeExpansion irrespective of actual value
IndexConditioningHelper ich2 = new IndexConditioningHelper(indxInfo[1], context, indexFieldsSize2, false, iterOperands, null);
// We cannot have a condition where in intermediateResultset is empty
// or null & complete
// expansion flag true because in that case instead of this function we should
// have called
int noOfIndexesToUse = intermediateResults == null || intermediateResults.isEmpty() ? 2 : 0;
RuntimeIterator[] resultFieldsItrMapping = null;
List allItrs = context.getCurrentIterators();
IndexConditioningHelper singleUsableICH = null;
IndexConditioningHelper nonUsableICH = null;
List finalList = completeExpansionNeeded ? allItrs : indpdntItrs == null ? new ArrayList() : null;
// the set will contain those iterators which we don't have to expand to either because they are
// already present ( because of intermediate results or because index result already contains
// them
Set expnItrsToIgnore = null;
if (noOfIndexesToUse == 0) {
// If the intermediate Resultset is not empty then check if the resultset
// fields of intermediate
// resultset contains any independent iterator of the current condition
noOfIndexesToUse = 2;
StructType stype = (StructType) intermediateResults.getCollectionType().getElementType();
String[] fieldNames = stype.getFieldNames();
int len = fieldNames.length;
resultFieldsItrMapping = new RuntimeIterator[len];
String fieldName = null;
String lhsID = ich1.indpndntItr.getInternalId();
String rhsID = ich2.indpndntItr.getInternalId();
for (int i = 0; i < len; ++i) {
fieldName = fieldNames[i];
if (noOfIndexesToUse != 0) {
if (fieldName.equals(lhsID)) {
--noOfIndexesToUse;
singleUsableICH = ich2;
nonUsableICH = ich1;
} else if (fieldName.equals(rhsID)) {
--noOfIndexesToUse;
singleUsableICH = ich1;
nonUsableICH = ich2;
}
}
int pos = Integer.parseInt(fieldName.substring(4));
RuntimeIterator itrPrsntInIntermdtRes = (RuntimeIterator) allItrs.get(pos - 1);
resultFieldsItrMapping[i] = itrPrsntInIntermdtRes;
// the iterator below is already present in resultset so needs to be ignored for expansion
if (completeExpansionNeeded) {
if (expnItrsToIgnore == null) {
expnItrsToIgnore = new HashSet();
}
expnItrsToIgnore.add(itrPrsntInIntermdtRes);
} else if (indpdntItrs == null) {
// We will need to know the intermediate iterators so as to know
// the final list which will be used to obtain the correct structset.
// But if the independent group of iterators is passed, the final list needs
// to be calculated
// on its basis
finalList.add(itrPrsntInIntermdtRes);
}
}
if (noOfIndexesToUse == 0) {
singleUsableICH = null;
}
}
QueryObserver observer = QueryObserverHolder.getInstance();
if (noOfIndexesToUse == 2) {
List data = null;
try {
ArrayList resultData = new ArrayList();
observer.beforeIndexLookup(indxInfo[0]._index, OQLLexerTokenTypes.TOK_EQ, null);
observer.beforeIndexLookup(indxInfo[1]._index, OQLLexerTokenTypes.TOK_EQ, null);
if (context.getBucketList() != null) {
data = queryEquijoinConditionBucketIndexes(indxInfo, context);
} else {
data = indxInfo[0]._index.queryEquijoinCondition(indxInfo[1]._index, context);
}
} finally {
observer.afterIndexLookup(data);
}
// For sure we need to evaluate both the conditions & expand it only to
// its own respective
// Ignore the boolean of reshuffling needed etc for this case
List totalExpList = new ArrayList();
totalExpList.addAll(ich1.expansionList);
totalExpList.addAll(ich2.expansionList);
if (completeExpansionNeeded) {
if (expnItrsToIgnore == null) {
// The expnItrsToIgnore set being null at this point implies that though complete
// expansion flag is true but intermediate result set is empty
Support.Assert(intermediateResults == null || intermediateResults.isEmpty(), "expnItrsToIgnore should not have been null if the intermediate result set is not empty");
expnItrsToIgnore = new HashSet();
}
expnItrsToIgnore.addAll(ich1.finalList);
expnItrsToIgnore.addAll(ich2.finalList);
// identify the iterators which we need to expand to
// TODO: Make the code compact by using a common function to take care of this
int size = finalList.size();
for (int i = 0; i < size; ++i) {
RuntimeIterator currItr = (RuntimeIterator) finalList.get(i);
// If the runtimeIterators of scope not present in CheckSet add it to the expansion list
if (!expnItrsToIgnore.contains(currItr)) {
totalExpList.add(currItr);
}
}
} else {
// struct set mismatch while doing intersection with GroupJunction results
if (indpdntItrs != null) {
finalList = getDependentItrChainForIndpndntItrs(indpdntItrs, context);
} else {
finalList.addAll(ich1.finalList);
finalList.addAll(ich2.finalList);
}
}
List[] checkList = new List[] { ich1.checkList, ich2.checkList };
StructType stype = createStructTypeForRuntimeIterators(finalList);
SelectResults returnSet = QueryUtils.createStructCollection(context, stype);
RuntimeIterator[][] mappings = new RuntimeIterator[2][];
mappings[0] = ich1.indexFieldToItrsMapping;
mappings[1] = ich2.indexFieldToItrsMapping;
List[] totalCheckList = new List[] { ich1.checkList, ich2.checkList };
RuntimeIterator[][] resultMappings = new RuntimeIterator[1][];
resultMappings[0] = resultFieldsItrMapping;
Iterator dataItr = data.iterator();
IndexCutDownExpansionHelper[] icdeh = new IndexCutDownExpansionHelper[] { new IndexCutDownExpansionHelper(ich1.checkList, context), new IndexCutDownExpansionHelper(ich2.checkList, context) };
ListIterator expansionListIterator = totalExpList.listIterator();
if (dataItr.hasNext()) {
observer = QueryObserverHolder.getInstance();
try {
observer.beforeMergeJoinOfDoubleIndexResults(indxInfo[0]._index, indxInfo[1]._index, data);
boolean doMergeWithIntermediateResults = intermediateResults != null && !intermediateResults.isEmpty();
int maxCartesianDepth = totalExpList.size() + (doMergeWithIntermediateResults ? 1 : 0);
while (dataItr.hasNext()) {
// TODO: Change the code in range Index so that while collecting data instead of
// creating two dimensional object array , we create one dimensional Object array of
// size 2, & each elemnt stores an Object array
Object[][] values = (Object[][]) dataItr.next();
// made by different data in the other set)
if (doMergeWithIntermediateResults) {
mergeRelationshipIndexResultsWithIntermediateResults(returnSet, new SelectResults[] { intermediateResults }, resultMappings, values, mappings, expansionListIterator, finalList, context, checkList, iterOperands, icdeh, 0, maxCartesianDepth);
} else {
mergeAndExpandCutDownRelationshipIndexResults(values, returnSet, mappings, expansionListIterator, finalList, context, totalCheckList, iterOperands, icdeh, 0);
}
if (icdeh[0].cutDownNeeded)
icdeh[0].checkSet.clear();
}
} finally {
observer.afterMergeJoinOfDoubleIndexResults(returnSet);
}
}
return returnSet;
} else if (noOfIndexesToUse == 1) {
// There exists one independent iterator in the current condition which is also a part of the
// intermediate resultset Identify the final List which will depend upon the complete
// expansion flag Identify the iterators to be expanded to, which will also depend upon
// complete expansion flag..
List totalExpList = new ArrayList();
totalExpList.addAll(singleUsableICH.expansionList);
if (completeExpansionNeeded) {
Support.Assert(expnItrsToIgnore != null, "expnItrsToIgnore should not have been null as we are in this block itself indicates that intermediate results was not null");
expnItrsToIgnore.addAll(singleUsableICH.finalList);
// identify the iterators which we need to expand to
// TODO: Make the code compact by using a common function to take care of this
int size = finalList.size();
for (int i = 0; i < size; ++i) {
RuntimeIterator currItr = (RuntimeIterator) finalList.get(i);
// If the runtimeIterators of scope not present in CheckSet add it to the expansion list
if (!expnItrsToIgnore.contains(currItr)) {
totalExpList.add(currItr);
}
}
} else {
// struct set mismatch while doing intersection with GroupJunction results
if (indpdntItrs != null) {
finalList = getDependentItrChainForIndpndntItrs(indpdntItrs, context);
} else {
finalList.addAll(singleUsableICH.finalList);
}
}
StructType stype = createStructTypeForRuntimeIterators(finalList);
SelectResults returnSet = QueryUtils.createStructCollection(context, stype);
// Obtain the empty resultset for the single usable index
IndexProtocol singleUsblIndex = singleUsableICH.indxInfo._index;
CompiledValue nonUsblIndxPath = nonUsableICH.indxInfo._path;
ObjectType singlUsblIndxResType = singleUsblIndex.getResultSetType();
SelectResults singlUsblIndxRes = null;
if (singlUsblIndxResType instanceof StructType) {
singlUsblIndxRes = QueryUtils.createStructCollection(context, (StructTypeImpl) singlUsblIndxResType);
} else {
singlUsblIndxRes = QueryUtils.createResultCollection(context, singlUsblIndxResType);
}
// iterate over the intermediate structset
Iterator intrmdtRsItr = intermediateResults.iterator();
observer = QueryObserverHolder.getInstance();
try {
observer.beforeIndexLookup(singleUsblIndex, OQLLexerTokenTypes.TOK_EQ, null);
observer.beforeIterJoinOfSingleIndexResults(singleUsblIndex, nonUsableICH.indxInfo._index);
while (intrmdtRsItr.hasNext()) {
Struct strc = (Struct) intrmdtRsItr.next();
Object[] val = strc.getFieldValues();
int len = val.length;
for (int i = 0; i < len; ++i) {
resultFieldsItrMapping[i].setCurrent(val[i]);
}
// TODO: Issue relevant index use callbacks to QueryObserver
Object key = nonUsblIndxPath.evaluate(context);
// TODO: Check this logic out
if (key != null && key.equals(QueryService.UNDEFINED)) {
continue;
}
singleUsblIndex.query(key, OQLLexerTokenTypes.TOK_EQ, singlUsblIndxRes, context);
cutDownAndExpandIndexResults(returnSet, singlUsblIndxRes, singleUsableICH.indexFieldToItrsMapping, totalExpList, finalList, context, singleUsableICH.checkList, iterOperands, singleUsableICH.indxInfo);
singlUsblIndxRes.clear();
}
} finally {
observer.afterIterJoinOfSingleIndexResults(returnSet);
observer.afterIndexLookup(returnSet);
}
return returnSet;
} else {
// PART OF ITER OPERANDS
if (logger.isDebugEnabled()) {
StringBuilder tempBuffLhs = new StringBuilder();
StringBuilder tempBuffRhs = new StringBuilder();
ich1.indxInfo._path.generateCanonicalizedExpression(tempBuffLhs, context);
ich2.indxInfo._path.generateCanonicalizedExpression(tempBuffRhs, context);
logger.debug("For better performance indexes are not used for the condition {} = {}", tempBuffLhs, tempBuffRhs);
}
CompiledValue reconstructedVal = new CompiledComparison(ich1.indxInfo._path, ich2.indxInfo._path, OQLLexerTokenTypes.TOK_EQ);
// Add this reconstructed value to the iter operand if any
CompiledValue finalVal = reconstructedVal;
if (iterOperands != null) {
// The type of CompiledJunction has to be AND junction as this function gets invoked only
// for AND . Also it is OK if we have iterOperands which itself is a CompiledJunction. We
// can have a tree of CompiledJunction with its operands being a CompiledComparison & a
// CompiledJunction. We can live without creating a flat structure
finalVal = new CompiledJunction(new CompiledValue[] { iterOperands, reconstructedVal }, OQLLexerTokenTypes.LITERAL_and);
}
RuntimeIterator[][] resultMappings = new RuntimeIterator[1][];
resultMappings[0] = resultFieldsItrMapping;
return cartesian(new SelectResults[] { intermediateResults }, resultMappings, Collections.emptyList(), finalList, context, finalVal);
}
}
use of org.apache.geode.cache.query.SelectResults in project geode by apache.
the class QueryUtils method doNestedIterations.
// TODO:Optimize the function further in terms of reducing the
// parameters passed in the function, if possible
private static void doNestedIterations(int level, SelectResults returnSet, SelectResults[] results, RuntimeIterator[][] itrsForResultFields, List finalItrs, ListIterator expansionItrs, int finalLevel, ExecutionContext context, CompiledValue operand) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
if (level == finalLevel) {
// end recusrion
boolean select = true;
if (operand != null) {
select = applyCondition(operand, context);
}
Iterator itr = finalItrs.iterator();
int len = finalItrs.size();
if (len > 1) {
Object[] values = new Object[len];
int j = 0;
while (itr.hasNext()) {
values[j++] = ((RuntimeIterator) itr.next()).evaluate(context);
}
if (select) {
((StructFields) returnSet).addFieldValues(values);
}
} else {
if (select)
returnSet.add(((RuntimeIterator) itr.next()).evaluate(context));
}
} else if (level < results.length) {
SelectResults individualResultSet = results[level];
RuntimeIterator[] itrsForFields = itrsForResultFields[level];
int len = itrsForFields.length;
for (Object anIndividualResultSet : individualResultSet) {
// Check if query execution on this thread is canceled.
QueryMonitor.isQueryExecutionCanceled();
if (len == 1) {
// this means we have a ResultSet
itrsForFields[0].setCurrent(anIndividualResultSet);
} else {
Struct struct = (Struct) anIndividualResultSet;
Object[] fieldValues = struct.getFieldValues();
int size = fieldValues.length;
for (int i = 0; i < size; ++i) {
itrsForFields[i].setCurrent(fieldValues[i]);
}
}
doNestedIterations(level + 1, returnSet, results, itrsForResultFields, finalItrs, expansionItrs, finalLevel, context, operand);
}
} else {
RuntimeIterator currLevel = (RuntimeIterator) expansionItrs.next();
SelectResults c = currLevel.evaluateCollection(context);
if (c == null) {
expansionItrs.previous();
return;
}
for (Object aC : c) {
currLevel.setCurrent(aC);
doNestedIterations(level + 1, returnSet, results, itrsForResultFields, finalItrs, expansionItrs, finalLevel, context, operand);
}
expansionItrs.previous();
}
}
use of org.apache.geode.cache.query.SelectResults in project geode by apache.
the class DerivedInfo method derivedDerivative.
private void derivedDerivative(Object[] idDerivedAndResults, ExecutionContext context, List expansionList) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
String idDerived = (String) idDerivedAndResults[0];
SelectResults results = (SelectResults) idDerivedAndResults[1];
RuntimeIterator ritr = getMatchingRuntimeIterator(idDerived, expansionList);
List remainingOps = this.getRemainingOps();
Iterator iterator = results.iterator();
while (iterator.hasNext()) {
Object val = iterator.next();
ritr.setCurrent(val);
createDerivedJoinResultsFromOpsList(idDerived, context, remainingOps);
}
}
use of org.apache.geode.cache.query.SelectResults in project geode by apache.
the class ProxyJUnitTest method testRegionMethods.
/**
* Confirms region (non-map) methods
*/
@Test
public void testRegionMethods() throws Exception {
Object cbArg = new Object();
AttributesFactory af = new AttributesFactory();
af.setDataPolicy(DataPolicy.EMPTY);
setCallbacks(af);
clearCallbackState();
ExpectedRegionEvent expre = new ExpectedRegionEvent();
assertEquals(0, getStats().getRegions());
Region r = this.c.createRegion("r", af.create());
assertEquals(1, getStats().getRegions());
expre.r = r;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
assertEquals("r", r.getName());
assertEquals("/r", r.getFullPath());
assertEquals(null, r.getParentRegion());
assertEquals(DataPolicy.EMPTY, r.getAttributes().getDataPolicy());
r.getAttributesMutator();
try {
r.getStatistics();
fail("expected StatisticsDisabledException");
} catch (StatisticsDisabledException expected) {
// because they were not enabled in the region attributes
}
r.invalidateRegion();
expre.op = Operation.REGION_INVALIDATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
r.invalidateRegion(cbArg);
expre.cbArg = cbArg;
checkNoCW();
checkCL(expre);
r.localInvalidateRegion();
expre.op = Operation.REGION_LOCAL_INVALIDATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
r.localInvalidateRegion(cbArg);
expre.cbArg = cbArg;
checkNoCW();
checkCL(expre);
r.destroyRegion();
assertEquals(true, r.isDestroyed());
assertEquals(0, getStats().getRegions());
expre.op = Operation.REGION_DESTROY;
expre.cbArg = null;
checkCW(expre);
checkCL(expre);
r = this.c.createRegion("r", af.create());
expre.r = r;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
r.destroyRegion(cbArg);
assertEquals(0, getStats().getRegions());
assertEquals(true, r.isDestroyed());
expre.op = Operation.REGION_DESTROY;
expre.cbArg = cbArg;
checkCW(expre);
checkCL(expre);
r = this.c.createRegion("r", af.create());
expre.r = r;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
r.localDestroyRegion();
assertEquals(0, getStats().getRegions());
assertEquals(true, r.isDestroyed());
expre.op = Operation.REGION_LOCAL_DESTROY;
expre.cbArg = null;
checkNoCW();
checkCWClosed();
checkCLClosed();
checkCL(expre);
r = this.c.createRegion("r", af.create());
expre.r = r;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
r.localDestroyRegion(cbArg);
assertEquals(0, getStats().getRegions());
assertEquals(true, r.isDestroyed());
expre.op = Operation.REGION_LOCAL_DESTROY;
expre.cbArg = cbArg;
checkNoCW();
checkCWClosed();
checkCLClosed();
checkCL(expre);
r = this.c.createRegion("r", af.create());
expre.r = r;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
r.close();
assertEquals(0, getStats().getRegions());
assertEquals(true, r.isDestroyed());
expre.op = Operation.REGION_CLOSE;
expre.cbArg = null;
checkNoCW();
checkCWClosed();
checkCLClosed();
checkCL(expre);
r = this.c.createRegion("r", af.create());
assertEquals(1, getStats().getRegions());
expre.r = r;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
try {
r.saveSnapshot(System.out);
fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException expected) {
}
try {
r.loadSnapshot(System.in);
fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException expected) {
}
{
Region sr = r.createSubregion("sr", af.create());
assertEquals(2, getStats().getRegions());
expre.r = sr;
expre.op = Operation.REGION_CREATE;
expre.cbArg = null;
checkNoCW();
checkCL(expre);
assertEquals("sr", sr.getName());
assertEquals("/r/sr", sr.getFullPath());
assertEquals(r, sr.getParentRegion());
assertEquals(sr, r.getSubregion("sr"));
assertEquals(Collections.singleton(sr), r.subregions(false));
sr.close();
assertEquals(1, getStats().getRegions());
expre.op = Operation.REGION_CLOSE;
expre.cbArg = null;
checkNoCW();
checkCWClosed();
checkCLClosed();
checkCL(expre);
assertEquals(true, sr.isDestroyed());
assertEquals(null, r.getSubregion("sr"));
assertEquals(Collections.EMPTY_SET, r.subregions(false));
}
ExpectedEntryEvent expee = new ExpectedEntryEvent();
expee.r = r;
expee.key = "key";
int creates = getStats().getCreates();
// int puts = getStats().getPuts();
// int updates = getStats().getUpdates();
int destroys = getStats().getDestroys();
int invalidates = getStats().getInvalidates();
int gets = getStats().getGets();
int misses = getStats().getMisses();
r.put("key", "value", cbArg);
expee.op = Operation.CREATE;
creates++;
assertEquals(creates, getStats().getCreates());
expee.cbArg = cbArg;
expee.newValue = "value";
checkCW(expee);
checkCL(expee);
// note on a non-proxy region create after put fails with EntryExistsException
r.create("key", "value", cbArg);
creates++;
assertEquals(creates, getStats().getCreates());
expee.op = Operation.CREATE;
expee.cbArg = cbArg;
expee.newValue = "value";
checkCW(expee);
checkCL(expee);
assertEquals(null, r.getEntry("key"));
assertEquals(null, r.get("key", cbArg));
gets++;
assertEquals(gets, getStats().getGets());
misses++;
assertEquals(misses, getStats().getMisses());
checkNoCW();
checkNoCL();
r.invalidate("key");
invalidates++;
assertEquals(invalidates, getStats().getInvalidates());
expee.op = Operation.INVALIDATE;
expee.cbArg = null;
expee.newValue = null;
checkNoCW();
checkCL(expee);
r.invalidate("key", cbArg);
invalidates++;
assertEquals(invalidates, getStats().getInvalidates());
expee.op = Operation.INVALIDATE;
expee.cbArg = cbArg;
expee.newValue = null;
checkNoCW();
checkCL(expee);
try {
r.localInvalidate("key");
fail("expected EntryNotFoundException");
} catch (EntryNotFoundException expected) {
}
try {
r.localInvalidate("key", cbArg);
fail("expected EntryNotFoundException");
} catch (EntryNotFoundException expected) {
}
assertEquals(invalidates, getStats().getInvalidates());
checkNoCW();
checkNoCL();
r.destroy("key");
destroys++;
assertEquals(destroys, getStats().getDestroys());
expee.op = Operation.DESTROY;
expee.cbArg = null;
expee.newValue = null;
checkCW(expee);
checkCL(expee);
r.destroy("key", cbArg);
destroys++;
assertEquals(destroys, getStats().getDestroys());
expee.op = Operation.DESTROY;
expee.cbArg = cbArg;
expee.newValue = null;
checkCW(expee);
checkCL(expee);
try {
r.localDestroy("key");
fail("expected EntryNotFoundException");
} catch (EntryNotFoundException expected) {
}
try {
r.localDestroy("key", cbArg);
fail("expected EntryNotFoundException");
} catch (EntryNotFoundException expected) {
}
assertEquals(destroys, getStats().getDestroys());
checkNoCW();
checkNoCL();
assertEquals(Collections.EMPTY_SET, r.keySet());
assertEquals(Collections.EMPTY_SET, r.entrySet(true));
assertEquals(this.c, r.getCache());
r.setUserAttribute(cbArg);
assertEquals(cbArg, r.getUserAttribute());
checkNoCW();
checkNoCL();
r.put("key", "value", cbArg);
creates++;
assertEquals(creates, getStats().getCreates());
expee.op = Operation.CREATE;
expee.cbArg = cbArg;
expee.newValue = "value";
checkCW(expee);
checkCL(expee);
assertEquals(false, r.containsValueForKey("key"));
assertEquals(false, r.existsValue("this = 'value'"));
{
SelectResults sr = r.query("this = 'value'");
assertEquals(Collections.EMPTY_SET, sr.asSet());
}
assertEquals(null, r.selectValue("this = 'value'"));
try {
r.getRegionDistributedLock();
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
// because we are not global
}
try {
r.getDistributedLock("key");
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
// because we are not global
}
try {
r.becomeLockGrantor();
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
// because we are not global
}
try {
r.writeToDisk();
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
// because we are not configured for disk
}
checkNoCW();
checkNoCL();
// check to see if a local loader works
{
CacheLoader cl = new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return "loadedValue";
}
public void close() {
}
};
r.getAttributesMutator().setCacheLoader(cl);
r.get("key", cbArg);
gets++;
assertEquals(gets, getStats().getGets());
misses++;
assertEquals(misses, getStats().getMisses());
expee.op = Operation.LOCAL_LOAD_CREATE;
expee.newValue = "loadedValue";
checkCW(expee);
checkCL(expee);
r.getAttributesMutator().setCacheLoader(null);
}
}
use of org.apache.geode.cache.query.SelectResults in project geode by apache.
the class CompiledInDUnitTest method verifyQuery.
void verifyQuery(final int numExpectedEntries, final String queryString, Object[] bindArguments) {
QueryService qs = getCache().getQueryService();
Query query = null;
SelectResults sr = null;
try {
query = qs.newQuery(queryString);
sr = (SelectResults) query.execute(bindArguments);
Iterator iterator = sr.iterator();
} catch (Exception ex) {
ex.printStackTrace();
Assert.fail("Failed to execute query, " + ex.getMessage());
}
Assert.assertEquals(numExpectedEntries, sr.size());
}
Aggregations