use of org.apache.geode.cache.query.internal.index.IndexProtocol in project geode by apache.
the class CompiledUndefined method getIndexInfo.
public IndexInfo[] getIndexInfo(ExecutionContext context) throws TypeMismatchException, AmbiguousNameException, NameResolutionException {
IndexInfo[] indexInfo = privGetIndexInfo(context);
if (indexInfo != null) {
if (indexInfo == NO_INDEXES_IDENTIFIER) {
return null;
} else {
return indexInfo;
}
}
if (!IndexUtils.indexesEnabled)
return null;
// TODO:Asif : Check if the condition is such that Primary Key Index is used
// & its key is DEFINED
// , then are we returning all the values of the region ?
// & that if the key is UNDEFINED are we returning an empty set.?
IndexData indexData = QueryUtils.getAvailableIndexIfAny(this._value, context, _is_defined ? TOK_NE : TOK_EQ);
IndexProtocol index = null;
IndexInfo[] newIndexInfo = null;
if (indexData != null) {
index = indexData.getIndex();
}
if (index != null && index.isValid()) {
newIndexInfo = new IndexInfo[1];
/*
* Pass the Key as null as the key is not of type CompiledValue( but of type
* QueryService.UNDEFINED)
*/
newIndexInfo[0] = new IndexInfo(null, this._value, index, indexData.getMatchLevel(), indexData.getMapping(), _is_defined ? TOK_NE : TOK_EQ);
}
if (newIndexInfo != null) {
privSetIndexInfo(newIndexInfo, context);
} else {
privSetIndexInfo(NO_INDEXES_IDENTIFIER, context);
}
return newIndexInfo;
}
use of org.apache.geode.cache.query.internal.index.IndexProtocol in project geode by apache.
the class QueryUtils method getRelationshipIndexIfAny.
/**
* Returns the pair of RangeIndexes available for a composite condition ( equi join across the
* region). It will either return two indexes or will return null. *
*
* @param lhs One of the operands of the equi-join condition
* @param rhs The other operand of the equi-join condition
* @param context ExecutionContext object
* @param operator The operator which necesarily has to be an equality ( ' = ' )
* @return An array of IndexData object with 0th IndexData for the lhs operand & 1th object for
* rhs operad
*/
static IndexData[] getRelationshipIndexIfAny(CompiledValue lhs, CompiledValue rhs, ExecutionContext context, int operator) throws AmbiguousNameException, TypeMismatchException, NameResolutionException {
if (operator != OQLLexerTokenTypes.TOK_EQ) {
// Operator must be '='
return null;
}
// Do not use PrimaryKey Index
IndexData lhsIndxData = QueryUtils.getAvailableIndexIfAny(lhs, context, false);
if (lhsIndxData == null) {
return null;
}
// Do not use PrimaryKey Index
IndexData rhsIndxData = QueryUtils.getAvailableIndexIfAny(rhs, context, false);
if (rhsIndxData == null) {
// release the lock held on lhsIndex as it will not be used
Index index = lhsIndxData.getIndex();
Index prIndex = ((AbstractIndex) index).getPRIndex();
if (prIndex != null) {
((PartitionedIndex) prIndex).releaseIndexReadLockForRemove();
} else {
((AbstractIndex) index).releaseIndexReadLockForRemove();
}
return null;
}
IndexProtocol lhsIndx = lhsIndxData.getIndex();
IndexProtocol rhsIndx = rhsIndxData.getIndex();
if (lhsIndx.isValid() && rhsIndx.isValid()) {
return new IndexData[] { lhsIndxData, rhsIndxData };
}
return null;
}
use of org.apache.geode.cache.query.internal.index.IndexProtocol in project geode by apache.
the class QueryUtils method queryEquijoinConditionBucketIndexes.
static List queryEquijoinConditionBucketIndexes(IndexInfo[] indxInfo, ExecutionContext context) throws QueryInvocationTargetException, TypeMismatchException, FunctionDomainException, NameResolutionException {
List resultData = new ArrayList();
AbstractIndex index0 = (AbstractIndex) indxInfo[0]._index;
AbstractIndex index1 = (AbstractIndex) indxInfo[1]._index;
PartitionedRegion pr0 = null;
if (index0.getRegion() instanceof BucketRegion) {
pr0 = ((Bucket) index0.getRegion()).getPartitionedRegion();
}
PartitionedRegion pr1 = null;
if (index1.getRegion() instanceof BucketRegion) {
pr1 = ((Bucket) index1.getRegion()).getPartitionedRegion();
}
List data = null;
IndexProtocol i0 = null;
IndexProtocol i1 = null;
for (Object b : context.getBucketList()) {
i0 = pr0 != null ? PartitionedIndex.getBucketIndex(pr0, index0.getName(), (Integer) b) : indxInfo[0]._index;
i1 = pr1 != null ? PartitionedIndex.getBucketIndex(pr1, index1.getName(), (Integer) b) : indxInfo[1]._index;
if (i0 == null || i1 == null) {
continue;
}
data = i0.queryEquijoinCondition(i1, context);
resultData.addAll(data);
}
data = resultData;
return data;
}
use of org.apache.geode.cache.query.internal.index.IndexProtocol in project geode by apache.
the class QueryUtils method getRelationshipIndexResultsMergedWithIntermediateResults.
/**
* This function is used to evaluate a filter evaluatable CompositeCondition(ie Range Indexes
* available on both LHS & RHS operands).This function is invoked from AND junction evaluation of
* CompositeGroupJunction. It expands the intermediate resultset passed , to the level of groups
* determined by the LHS & RHS operand, using the range indexes. It is possible that the group of
* iterators for an operand of condition already exists in the intermediate resultset passed. In
* such situation, the intermediate resultset is iterated & the operand ( whose group of iterators
* are available in the intermediate resultset ) is evaluated. For each such evaluated value , the
* other operand's Range Index is queried & the Range Index's results are appropriately expanded &
* cut down & a final tuple obtained( which includes the previously existing fields of
* intermediate resultset). The array of independent iterators passed from the Composite Group
* junction will be null, except for the final condition ( subject to the fact that complete
* expansion flag is false. Otherwise even for final condition , the array will be null) as that
* array will be used to get the final position of iterators in the resultant StructBag
*
* TODO: break this method up
*
* @param intermediateResults SelectResults object containing the intermediate resultset obtained
* by evaluation of previous filter evaluatable composite conditions of the
* CompositeGroupJunction
* @param indxInfo Array of IndexInfo objects ( size 2), representing the range index for the two
* operands of the condition
* @param context ExecutionContext object
* @param completeExpansionNeeded A boolean when true indicates that the final result from
* Composite GroupJunction needs to be evaluated to the query from clause ( top ) level.
* @param iterOperands CompiledValue representing the conditions which are to be iter evaluated.
* This can exist only if instead of AllGroupJunction we have a single
* CompositeGroupJunction
* @param indpdntItrs Array of RuntimeIterators representing the independent iterators of their
* representative groups forming the CompositeGroupJunction *
* @return SelectResults The Result object created by evaluating the filter evaluatable condition
* merged with the intermediate results
*/
static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults(SelectResults intermediateResults, IndexInfo[] indxInfo, ExecutionContext context, boolean completeExpansionNeeded, CompiledValue iterOperands, RuntimeIterator[] indpdntItrs) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
ObjectType resultType1 = indxInfo[0]._index.getResultSetType();
int indexFieldsSize1 = resultType1 instanceof StructType ? ((StructTypeImpl) resultType1).getFieldNames().length : 1;
ObjectType resultType2 = indxInfo[1]._index.getResultSetType();
int indexFieldsSize2 = resultType2 instanceof StructType ? ((StructTypeImpl) resultType2).getFieldNames().length : 1;
/*
* even if the complete expansion is needed pass the flag of complete expansion as false. Thus
* for LHS & RHS we will get the expansionList for that individual group.
*/
// NOTE: use false for completeExpansion irrespective of actual value
IndexConditioningHelper ich1 = new IndexConditioningHelper(indxInfo[0], context, indexFieldsSize1, false, iterOperands, null);
// NOTE: use false for completeExpansion irrespective of actual value
IndexConditioningHelper ich2 = new IndexConditioningHelper(indxInfo[1], context, indexFieldsSize2, false, iterOperands, null);
// We cannot have a condition where in intermediateResultset is empty
// or null & complete
// expansion flag true because in that case instead of this function we should
// have called
int noOfIndexesToUse = intermediateResults == null || intermediateResults.isEmpty() ? 2 : 0;
RuntimeIterator[] resultFieldsItrMapping = null;
List allItrs = context.getCurrentIterators();
IndexConditioningHelper singleUsableICH = null;
IndexConditioningHelper nonUsableICH = null;
List finalList = completeExpansionNeeded ? allItrs : indpdntItrs == null ? new ArrayList() : null;
// the set will contain those iterators which we don't have to expand to either because they are
// already present ( because of intermediate results or because index result already contains
// them
Set expnItrsToIgnore = null;
if (noOfIndexesToUse == 0) {
// If the intermediate Resultset is not empty then check if the resultset
// fields of intermediate
// resultset contains any independent iterator of the current condition
noOfIndexesToUse = 2;
StructType stype = (StructType) intermediateResults.getCollectionType().getElementType();
String[] fieldNames = stype.getFieldNames();
int len = fieldNames.length;
resultFieldsItrMapping = new RuntimeIterator[len];
String fieldName = null;
String lhsID = ich1.indpndntItr.getInternalId();
String rhsID = ich2.indpndntItr.getInternalId();
for (int i = 0; i < len; ++i) {
fieldName = fieldNames[i];
if (noOfIndexesToUse != 0) {
if (fieldName.equals(lhsID)) {
--noOfIndexesToUse;
singleUsableICH = ich2;
nonUsableICH = ich1;
} else if (fieldName.equals(rhsID)) {
--noOfIndexesToUse;
singleUsableICH = ich1;
nonUsableICH = ich2;
}
}
int pos = Integer.parseInt(fieldName.substring(4));
RuntimeIterator itrPrsntInIntermdtRes = (RuntimeIterator) allItrs.get(pos - 1);
resultFieldsItrMapping[i] = itrPrsntInIntermdtRes;
// the iterator below is already present in resultset so needs to be ignored for expansion
if (completeExpansionNeeded) {
if (expnItrsToIgnore == null) {
expnItrsToIgnore = new HashSet();
}
expnItrsToIgnore.add(itrPrsntInIntermdtRes);
} else if (indpdntItrs == null) {
// We will need to know the intermediate iterators so as to know
// the final list which will be used to obtain the correct structset.
// But if the independent group of iterators is passed, the final list needs
// to be calculated
// on its basis
finalList.add(itrPrsntInIntermdtRes);
}
}
if (noOfIndexesToUse == 0) {
singleUsableICH = null;
}
}
QueryObserver observer = QueryObserverHolder.getInstance();
if (noOfIndexesToUse == 2) {
List data = null;
try {
ArrayList resultData = new ArrayList();
observer.beforeIndexLookup(indxInfo[0]._index, OQLLexerTokenTypes.TOK_EQ, null);
observer.beforeIndexLookup(indxInfo[1]._index, OQLLexerTokenTypes.TOK_EQ, null);
if (context.getBucketList() != null) {
data = queryEquijoinConditionBucketIndexes(indxInfo, context);
} else {
data = indxInfo[0]._index.queryEquijoinCondition(indxInfo[1]._index, context);
}
} finally {
observer.afterIndexLookup(data);
}
// For sure we need to evaluate both the conditions & expand it only to
// its own respective
// Ignore the boolean of reshuffling needed etc for this case
List totalExpList = new ArrayList();
totalExpList.addAll(ich1.expansionList);
totalExpList.addAll(ich2.expansionList);
if (completeExpansionNeeded) {
if (expnItrsToIgnore == null) {
// The expnItrsToIgnore set being null at this point implies that though complete
// expansion flag is true but intermediate result set is empty
Support.Assert(intermediateResults == null || intermediateResults.isEmpty(), "expnItrsToIgnore should not have been null if the intermediate result set is not empty");
expnItrsToIgnore = new HashSet();
}
expnItrsToIgnore.addAll(ich1.finalList);
expnItrsToIgnore.addAll(ich2.finalList);
// identify the iterators which we need to expand to
// TODO: Make the code compact by using a common function to take care of this
int size = finalList.size();
for (int i = 0; i < size; ++i) {
RuntimeIterator currItr = (RuntimeIterator) finalList.get(i);
// If the runtimeIterators of scope not present in CheckSet add it to the expansion list
if (!expnItrsToIgnore.contains(currItr)) {
totalExpList.add(currItr);
}
}
} else {
// struct set mismatch while doing intersection with GroupJunction results
if (indpdntItrs != null) {
finalList = getDependentItrChainForIndpndntItrs(indpdntItrs, context);
} else {
finalList.addAll(ich1.finalList);
finalList.addAll(ich2.finalList);
}
}
List[] checkList = new List[] { ich1.checkList, ich2.checkList };
StructType stype = createStructTypeForRuntimeIterators(finalList);
SelectResults returnSet = QueryUtils.createStructCollection(context, stype);
RuntimeIterator[][] mappings = new RuntimeIterator[2][];
mappings[0] = ich1.indexFieldToItrsMapping;
mappings[1] = ich2.indexFieldToItrsMapping;
List[] totalCheckList = new List[] { ich1.checkList, ich2.checkList };
RuntimeIterator[][] resultMappings = new RuntimeIterator[1][];
resultMappings[0] = resultFieldsItrMapping;
Iterator dataItr = data.iterator();
IndexCutDownExpansionHelper[] icdeh = new IndexCutDownExpansionHelper[] { new IndexCutDownExpansionHelper(ich1.checkList, context), new IndexCutDownExpansionHelper(ich2.checkList, context) };
ListIterator expansionListIterator = totalExpList.listIterator();
if (dataItr.hasNext()) {
observer = QueryObserverHolder.getInstance();
try {
observer.beforeMergeJoinOfDoubleIndexResults(indxInfo[0]._index, indxInfo[1]._index, data);
boolean doMergeWithIntermediateResults = intermediateResults != null && !intermediateResults.isEmpty();
int maxCartesianDepth = totalExpList.size() + (doMergeWithIntermediateResults ? 1 : 0);
while (dataItr.hasNext()) {
// TODO: Change the code in range Index so that while collecting data instead of
// creating two dimensional object array , we create one dimensional Object array of
// size 2, & each elemnt stores an Object array
Object[][] values = (Object[][]) dataItr.next();
// made by different data in the other set)
if (doMergeWithIntermediateResults) {
mergeRelationshipIndexResultsWithIntermediateResults(returnSet, new SelectResults[] { intermediateResults }, resultMappings, values, mappings, expansionListIterator, finalList, context, checkList, iterOperands, icdeh, 0, maxCartesianDepth);
} else {
mergeAndExpandCutDownRelationshipIndexResults(values, returnSet, mappings, expansionListIterator, finalList, context, totalCheckList, iterOperands, icdeh, 0);
}
if (icdeh[0].cutDownNeeded)
icdeh[0].checkSet.clear();
}
} finally {
observer.afterMergeJoinOfDoubleIndexResults(returnSet);
}
}
return returnSet;
} else if (noOfIndexesToUse == 1) {
// There exists one independent iterator in the current condition which is also a part of the
// intermediate resultset Identify the final List which will depend upon the complete
// expansion flag Identify the iterators to be expanded to, which will also depend upon
// complete expansion flag..
List totalExpList = new ArrayList();
totalExpList.addAll(singleUsableICH.expansionList);
if (completeExpansionNeeded) {
Support.Assert(expnItrsToIgnore != null, "expnItrsToIgnore should not have been null as we are in this block itself indicates that intermediate results was not null");
expnItrsToIgnore.addAll(singleUsableICH.finalList);
// identify the iterators which we need to expand to
// TODO: Make the code compact by using a common function to take care of this
int size = finalList.size();
for (int i = 0; i < size; ++i) {
RuntimeIterator currItr = (RuntimeIterator) finalList.get(i);
// If the runtimeIterators of scope not present in CheckSet add it to the expansion list
if (!expnItrsToIgnore.contains(currItr)) {
totalExpList.add(currItr);
}
}
} else {
// struct set mismatch while doing intersection with GroupJunction results
if (indpdntItrs != null) {
finalList = getDependentItrChainForIndpndntItrs(indpdntItrs, context);
} else {
finalList.addAll(singleUsableICH.finalList);
}
}
StructType stype = createStructTypeForRuntimeIterators(finalList);
SelectResults returnSet = QueryUtils.createStructCollection(context, stype);
// Obtain the empty resultset for the single usable index
IndexProtocol singleUsblIndex = singleUsableICH.indxInfo._index;
CompiledValue nonUsblIndxPath = nonUsableICH.indxInfo._path;
ObjectType singlUsblIndxResType = singleUsblIndex.getResultSetType();
SelectResults singlUsblIndxRes = null;
if (singlUsblIndxResType instanceof StructType) {
singlUsblIndxRes = QueryUtils.createStructCollection(context, (StructTypeImpl) singlUsblIndxResType);
} else {
singlUsblIndxRes = QueryUtils.createResultCollection(context, singlUsblIndxResType);
}
// iterate over the intermediate structset
Iterator intrmdtRsItr = intermediateResults.iterator();
observer = QueryObserverHolder.getInstance();
try {
observer.beforeIndexLookup(singleUsblIndex, OQLLexerTokenTypes.TOK_EQ, null);
observer.beforeIterJoinOfSingleIndexResults(singleUsblIndex, nonUsableICH.indxInfo._index);
while (intrmdtRsItr.hasNext()) {
Struct strc = (Struct) intrmdtRsItr.next();
Object[] val = strc.getFieldValues();
int len = val.length;
for (int i = 0; i < len; ++i) {
resultFieldsItrMapping[i].setCurrent(val[i]);
}
// TODO: Issue relevant index use callbacks to QueryObserver
Object key = nonUsblIndxPath.evaluate(context);
// TODO: Check this logic out
if (key != null && key.equals(QueryService.UNDEFINED)) {
continue;
}
singleUsblIndex.query(key, OQLLexerTokenTypes.TOK_EQ, singlUsblIndxRes, context);
cutDownAndExpandIndexResults(returnSet, singlUsblIndxRes, singleUsableICH.indexFieldToItrsMapping, totalExpList, finalList, context, singleUsableICH.checkList, iterOperands, singleUsableICH.indxInfo);
singlUsblIndxRes.clear();
}
} finally {
observer.afterIterJoinOfSingleIndexResults(returnSet);
observer.afterIndexLookup(returnSet);
}
return returnSet;
} else {
// PART OF ITER OPERANDS
if (logger.isDebugEnabled()) {
StringBuilder tempBuffLhs = new StringBuilder();
StringBuilder tempBuffRhs = new StringBuilder();
ich1.indxInfo._path.generateCanonicalizedExpression(tempBuffLhs, context);
ich2.indxInfo._path.generateCanonicalizedExpression(tempBuffRhs, context);
logger.debug("For better performance indexes are not used for the condition {} = {}", tempBuffLhs, tempBuffRhs);
}
CompiledValue reconstructedVal = new CompiledComparison(ich1.indxInfo._path, ich2.indxInfo._path, OQLLexerTokenTypes.TOK_EQ);
// Add this reconstructed value to the iter operand if any
CompiledValue finalVal = reconstructedVal;
if (iterOperands != null) {
// The type of CompiledJunction has to be AND junction as this function gets invoked only
// for AND . Also it is OK if we have iterOperands which itself is a CompiledJunction. We
// can have a tree of CompiledJunction with its operands being a CompiledComparison & a
// CompiledJunction. We can live without creating a flat structure
finalVal = new CompiledJunction(new CompiledValue[] { iterOperands, reconstructedVal }, OQLLexerTokenTypes.LITERAL_and);
}
RuntimeIterator[][] resultMappings = new RuntimeIterator[1][];
resultMappings[0] = resultFieldsItrMapping;
return cartesian(new SelectResults[] { intermediateResults }, resultMappings, Collections.emptyList(), finalList, context, finalVal);
}
}
use of org.apache.geode.cache.query.internal.index.IndexProtocol in project geode by apache.
the class DerivedInfo method addDerivedResults.
public void addDerivedResults(IndexInfo indexInfo, SelectResults sr) {
IndexProtocol index = indexInfo._index;
String key = QueryUtils.getCompiledIdFromPath(indexInfo._path).getId() + ":" + index.getCanonicalizedIteratorDefinitions()[0];
// String key = index.getCanonicalizedIteratorDefinitions()[0];
if (derivedResults.containsKey(key)) {
derivedResults.get(key).addAll(sr);
} else {
derivedResults.put(key, sr);
}
newDerivatives.add(new Object[] { QueryUtils.getCompiledIdFromPath(indexInfo._path).getId(), sr });
successfulOps.add(currentOp);
}
Aggregations