use of org.openrdf.query.algebra.QueryModelNode in project incubator-rya by apache.
the class PCJOptimizerTest method testSegmentWithUnionAndFilters.
@Test
public void testSegmentWithUnionAndFilters() throws Exception {
final String query1 = //
"" + //
"SELECT ?e ?c ?l" + //
"{" + //
" Filter(?e = <uri:s1>) " + //
" Filter(?c = <uri:s2>) " + //
" {?e <uri:p1> <uri:o1>. } UNION { ?e a ?c. OPTIONAL {?e <uri:talksTo> ?l}. ?e <uri:p5> <uri:o4>. ?e <uri:p4> <uri:o3> } . " + //
" ?e <uri:p2> ?c . " + //
" ?e <uri:p3> <uri:o2> . " + //
"}";
final String query2 = //
"" + //
"SELECT ?a ?b ?m" + //
"{" + //
" Filter(?b = <uri:s2>) " + //
" ?a <uri:p5> <uri:o4> ." + //
" ?a <uri:p4> <uri:o3> ." + //
" OPTIONAL {?a <uri:talksTo> ?m} . " + //
" ?a a ?b . " + //
"}";
final String query3 = //
"" + //
"SELECT ?h ?i" + //
"{" + //
" Filter(?h = <uri:s1>) " + //
" ?h <uri:p2> ?i . " + //
" ?h <uri:p3> <uri:o2> . " + //
"}";
final SPARQLParser parser = new SPARQLParser();
final ParsedQuery pq1 = parser.parseQuery(query1, null);
final ParsedQuery pq2 = parser.parseQuery(query2, null);
final ParsedQuery pq3 = parser.parseQuery(query3, null);
final TupleExpr te1 = pq1.getTupleExpr();
final TupleExpr te2 = pq2.getTupleExpr();
final TupleExpr te3 = pq3.getTupleExpr();
final TupleExpr unOpt = te1.clone();
final List<QueryModelNode> remainingNodes = getNodes(te1);
final Set<QueryModelNode> unMatchedNodes = new HashSet<>();
unMatchedNodes.add(remainingNodes.get(0));
final SimpleExternalTupleSet pcj1 = new SimpleExternalTupleSet((Projection) te2);
final SimpleExternalTupleSet pcj2 = new SimpleExternalTupleSet((Projection) te3);
final List<ExternalTupleSet> externalList = new ArrayList<>();
externalList.add(pcj1);
externalList.add(pcj2);
provider.setIndices(externalList);
final PCJOptimizer optimizer = new PCJOptimizer(externalList, false, provider);
optimizer.optimize(te1, null, null);
Assert.assertEquals(true, validatePcj(te1, unOpt, externalList, unMatchedNodes));
}
use of org.openrdf.query.algebra.QueryModelNode in project incubator-rya by apache.
the class PCJOptimizerTest method testSegmentWithLargeUnion.
@Test
public void testSegmentWithLargeUnion() throws Exception {
final String query1 = //
"" + //
"SELECT ?e ?c ?l" + //
"{" + //
" {?e <uri:p1> <uri:o1>. } UNION { " + //
" ?e <uri:p0> ?l ." + //
" ?l <uri:p5> <uri:o5> ." + " OPTIONAL{?l <uri:p4> <uri:o4>} ." + //
" ?c<uri:p1> ?l ." + //
" OPTIONAL{ ?e <uri:p1> ?c } ." + //
" ?e <uri:p2> <uri:o2>. " + //
" ?c <uri:p3> <uri:o3> " + //
" } . " + //
" ?e <uri:p2> ?c . " + //
" ?e <uri:p3> <uri:o2> . " + //
"}";
final String query2 = //
"" + //
"SELECT ?a ?b ?c " + //
"{" + //
" ?a <uri:p2> <uri:o2>. " + //
" ?b <uri:p3> <uri:o3>. " + //
" OPTIONAL{ ?a <uri:p1> ?b } ." + //
" ?a <uri:p0> ?c ." + //
" ?b<uri:p1> ?c " + //
"}";
final String query3 = //
"" + //
"SELECT ?h ?i" + //
"{" + //
" ?h <uri:p2> ?i . " + //
" ?h <uri:p3> <uri:o2> . " + //
"}";
final SPARQLParser parser = new SPARQLParser();
final ParsedQuery pq1 = parser.parseQuery(query1, null);
final ParsedQuery pq2 = parser.parseQuery(query2, null);
final ParsedQuery pq3 = parser.parseQuery(query3, null);
final TupleExpr te1 = pq1.getTupleExpr();
final TupleExpr te2 = pq2.getTupleExpr();
final TupleExpr te3 = pq3.getTupleExpr();
final TupleExpr unOpt = te1.clone();
final List<QueryModelNode> remainingNodes = getNodes(te1);
final Set<QueryModelNode> unMatchedNodes = new HashSet<>();
unMatchedNodes.add(remainingNodes.get(0));
unMatchedNodes.add(remainingNodes.get(2));
unMatchedNodes.add(remainingNodes.get(3));
final SimpleExternalTupleSet pcj1 = new SimpleExternalTupleSet((Projection) te2);
final SimpleExternalTupleSet pcj2 = new SimpleExternalTupleSet((Projection) te3);
final List<ExternalTupleSet> externalList = new ArrayList<>();
externalList.add(pcj1);
externalList.add(pcj2);
provider.setIndices(externalList);
final PCJOptimizer optimizer = new PCJOptimizer(externalList, false, provider);
optimizer.optimize(te1, null, null);
Assert.assertEquals(true, validatePcj(te1, unOpt, externalList, unMatchedNodes));
}
use of org.openrdf.query.algebra.QueryModelNode in project incubator-rya by apache.
the class PCJOptimizerTest method testSegmentWithUnion.
@Test
public void testSegmentWithUnion() throws Exception {
final String query1 = //
"" + //
"SELECT ?e ?c ?l" + //
"{" + //
" {?e <uri:p1> <uri:o1>. } UNION { ?e a ?c. OPTIONAL {?e <uri:talksTo> ?l}. ?e <uri:p5> <uri:o4>. ?e <uri:p4> <uri:o3> } . " + //
" ?e <uri:p2> ?c . " + //
" ?e <uri:p3> <uri:o2> . " + //
"}";
final String query2 = //
"" + //
"SELECT ?a ?b ?m" + //
"{" + //
" ?a <uri:p5> <uri:o4> ." + //
" ?a <uri:p4> <uri:o3> ." + //
" OPTIONAL {?a <uri:talksTo> ?m} . " + //
" ?a a ?b . " + //
"}";
final String query3 = //
"" + //
"SELECT ?h ?i" + //
"{" + //
" ?h <uri:p2> ?i . " + //
" ?h <uri:p3> <uri:o2> . " + //
"}";
final SPARQLParser parser = new SPARQLParser();
final ParsedQuery pq1 = parser.parseQuery(query1, null);
final ParsedQuery pq2 = parser.parseQuery(query2, null);
final ParsedQuery pq3 = parser.parseQuery(query3, null);
final TupleExpr te1 = pq1.getTupleExpr();
final TupleExpr te2 = pq2.getTupleExpr();
final TupleExpr te3 = pq3.getTupleExpr();
final TupleExpr unOpt = te1.clone();
final List<QueryModelNode> remainingNodes = getNodes(te1);
final Set<QueryModelNode> unMatchedNodes = new HashSet<>();
unMatchedNodes.add(remainingNodes.get(0));
final SimpleExternalTupleSet pcj1 = new SimpleExternalTupleSet((Projection) te2);
final SimpleExternalTupleSet pcj2 = new SimpleExternalTupleSet((Projection) te3);
final List<ExternalTupleSet> externalList = new ArrayList<>();
externalList.add(pcj1);
externalList.add(pcj2);
provider.setIndices(externalList);
final PCJOptimizer optimizer = new PCJOptimizer(externalList, false, provider);
optimizer.optimize(te1, null, null);
Assert.assertEquals(true, validatePcj(te1, unOpt, externalList, unMatchedNodes));
}
use of org.openrdf.query.algebra.QueryModelNode in project incubator-rya by apache.
the class StatementMetadataExternalSetProvider method getExternalSets.
/**
* This method extracts all {@link StatementMetadataNode}s from the provided {@link QuerySegment}.
* It looks through the provided QuerySegment for all combinations of {@link StatementPattern}s that
* represent a reified query and combines those into a StatementPatternNode. A StatementPattern cannot
* be used in more than one reified query and StatementPatternNode.
*/
@Override
public List<StatementMetadataNode<?>> getExternalSets(QuerySegment<StatementMetadataNode<?>> segment) {
reifiedQueries = HashMultimap.create();
List<StatementMetadataNode<?>> metadataList = new ArrayList<>();
for (QueryModelNode node : segment.getUnOrderedNodes()) {
if (node instanceof StatementPattern) {
StatementPattern sp = (StatementPattern) node;
reifiedQueries.put(sp.getSubjectVar(), sp);
}
}
for (Var var : reifiedQueries.keySet()) {
Collection<StatementPattern> patterns = removeInvalidProperties(reifiedQueries.get(var));
if (StatementMetadataNode.verifyHasCorrectTypePattern(patterns)) {
metadataList.add(new StatementMetadataNode<>(patterns, conf));
}
}
return metadataList;
}
use of org.openrdf.query.algebra.QueryModelNode in project incubator-rya by apache.
the class QueryVariableNormalizer method getListVarCnt.
/**
* Given a list of StatementPattern nodes and a TreeMap containing the
* variables in the tuple, this method counts the number of occurrences of
* each variable in the given list
*
* @param list
* List of StatementPattern nodes
* @param cnt
* TreeMap whose keys are tuple variables and whose value is 0
* @return TreeMap whose keys are tuple variables and whose value is the
* number of times variable appears in list
*/
private static TreeMap<String, Integer> getListVarCnt(List<QueryModelNode> list, TreeMap<String, Integer> cnt) {
int count = 0;
for (QueryModelNode qNode : list) {
List<String> vars = VarCollector.process(qNode);
for (String s : vars) {
count = cnt.get(s);
count++;
cnt.put(s, count);
}
}
return cnt;
}
Aggregations