use of org.apache.lucene.search.WildcardQuery in project intellij-community by JetBrains.
the class MavenIndicesTest method testSearchingAfterArtifactAddition.
public void testSearchingAfterArtifactAddition() throws Exception {
MavenIndex i = myIndices.add("id", myRepositoryHelper.getTestDataPath("local1"), MavenIndex.Kind.LOCAL);
myIndices.updateOrRepair(i, true, getMavenGeneralSettings(), EMPTY_MAVEN_PROCESS);
i.addArtifact(new File(myRepositoryHelper.getTestDataPath("local2/jmock/jmock/1.0.0/jmock-1.0.0.jar")));
assertSearchResults(i, new WildcardQuery(new Term(MavenServerIndexer.SEARCH_TERM_COORDINATES, "*jmock*")), "jmock:jmock:1.0.0");
}
use of org.apache.lucene.search.WildcardQuery in project intellij-community by JetBrains.
the class MavenIndicesTest method testSearchingForClassesAfterArtifactAddition.
public void testSearchingForClassesAfterArtifactAddition() throws Exception {
MavenIndex i = myIndices.add("id", myRepositoryHelper.getTestDataPath("local1"), MavenIndex.Kind.LOCAL);
myIndices.updateOrRepair(i, true, getMavenGeneralSettings(), EMPTY_MAVEN_PROCESS);
i.addArtifact(new File(myRepositoryHelper.getTestDataPath("local2/jmock/jmock/1.0.0/jmock-1.0.0.jar")));
assertSearchResults(i, new WildcardQuery(new Term(MavenServerIndexer.SEARCH_TERM_CLASS_NAMES, "*mock*")), "jmock:jmock:1.0.0");
}
use of org.apache.lucene.search.WildcardQuery in project intellij-community by JetBrains.
the class MavenIndicesTest method testRestartingIndicesManagerOnRemoteMavenServerShutdown.
public void testRestartingIndicesManagerOnRemoteMavenServerShutdown() throws Exception {
MavenIndex i = myIndices.add("id", myRepositoryHelper.getTestDataPath("local1"), MavenIndex.Kind.LOCAL);
myIndices.updateOrRepair(i, true, getMavenGeneralSettings(), EMPTY_MAVEN_PROCESS);
assertSearchResults(i, new WildcardQuery(new Term(MavenServerIndexer.SEARCH_TERM_COORDINATES, "*junit*")), "junit:junit:3.8.1", "junit:junit:3.8.2", "junit:junit:4.0");
MavenServerManager.getInstance().shutdown(true);
assertSearchResults(i, new WildcardQuery(new Term(MavenServerIndexer.SEARCH_TERM_COORDINATES, "*junit*")), "junit:junit:3.8.1", "junit:junit:3.8.2", "junit:junit:4.0");
}
use of org.apache.lucene.search.WildcardQuery in project querydsl by querydsl.
the class LuceneSerializer method startsWith.
protected Query startsWith(QueryMetadata metadata, Operation<?> operation, boolean ignoreCase) {
verifyArguments(operation);
Path<?> path = getPath(operation.getArg(0));
String field = toField(path);
String[] terms = convertEscaped(path, operation.getArg(1), metadata);
if (terms.length > 1) {
BooleanQuery bq = new BooleanQuery();
for (int i = 0; i < terms.length; ++i) {
String s = i == 0 ? terms[i] + "*" : "*" + terms[i] + "*";
bq.add(new WildcardQuery(new Term(field, s)), Occur.MUST);
}
return bq;
}
return new PrefixQuery(new Term(field, terms[0]));
}
use of org.apache.lucene.search.WildcardQuery in project jackrabbit-oak by apache.
the class LuceneIndex method addNonFullTextConstraints.
private static void addNonFullTextConstraints(List<Query> qs, Filter filter, IndexReader reader, Analyzer analyzer, IndexDefinition indexDefinition) {
if (!filter.matchesAllTypes()) {
addNodeTypeConstraints(qs, filter);
}
String path = filter.getPath();
switch(filter.getPathRestriction()) {
case ALL_CHILDREN:
if (USE_PATH_RESTRICTION) {
if ("/".equals(path)) {
break;
}
if (!path.endsWith("/")) {
path += "/";
}
qs.add(new PrefixQuery(newPathTerm(path)));
}
break;
case DIRECT_CHILDREN:
if (USE_PATH_RESTRICTION) {
if (!path.endsWith("/")) {
path += "/";
}
qs.add(new PrefixQuery(newPathTerm(path)));
}
break;
case EXACT:
qs.add(new TermQuery(newPathTerm(path)));
break;
case PARENT:
if (denotesRoot(path)) {
// there's no parent of the root node
// we add a path that can not possibly occur because there
// is no way to say "match no documents" in Lucene
qs.add(new TermQuery(new Term(FieldNames.PATH, "///")));
} else {
qs.add(new TermQuery(newPathTerm(getParentPath(path))));
}
break;
case NO_RESTRICTION:
break;
}
//Fulltext index definition used by LuceneIndex only works with old format
//which is not nodeType based. So just use the nt:base index
IndexingRule rule = indexDefinition.getApplicableIndexingRule(JcrConstants.NT_BASE);
for (PropertyRestriction pr : filter.getPropertyRestrictions()) {
if (pr.first == null && pr.last == null) {
// queries (OAK-1208)
continue;
}
// check excluded properties and types
if (isExcludedProperty(pr, rule)) {
continue;
}
String name = pr.propertyName;
if (QueryImpl.REP_EXCERPT.equals(name) || QueryImpl.OAK_SCORE_EXPLANATION.equals(name) || QueryImpl.REP_FACET.equals(name)) {
continue;
}
if (JCR_PRIMARYTYPE.equals(name)) {
continue;
}
if (QueryConstants.RESTRICTION_LOCAL_NAME.equals(name)) {
continue;
}
if (skipTokenization(name)) {
qs.add(new TermQuery(new Term(name, pr.first.getValue(STRING))));
continue;
}
String first = null;
String last = null;
boolean isLike = pr.isLike;
// TODO what to do with escaped tokens?
if (pr.first != null) {
first = pr.first.getValue(STRING);
first = first.replace("\\", "");
}
if (pr.last != null) {
last = pr.last.getValue(STRING);
last = last.replace("\\", "");
}
if (isLike) {
first = first.replace('%', WildcardQuery.WILDCARD_STRING);
first = first.replace('_', WildcardQuery.WILDCARD_CHAR);
int indexOfWS = first.indexOf(WildcardQuery.WILDCARD_STRING);
int indexOfWC = first.indexOf(WildcardQuery.WILDCARD_CHAR);
int len = first.length();
if (indexOfWS == len || indexOfWC == len) {
// remove trailing "*" for prefixquery
first = first.substring(0, first.length() - 1);
if (JCR_PATH.equals(name)) {
qs.add(new PrefixQuery(newPathTerm(first)));
} else {
qs.add(new PrefixQuery(new Term(name, first)));
}
} else {
if (JCR_PATH.equals(name)) {
qs.add(new WildcardQuery(newPathTerm(first)));
} else {
qs.add(new WildcardQuery(new Term(name, first)));
}
}
continue;
}
if (first != null && first.equals(last) && pr.firstIncluding && pr.lastIncluding) {
if (JCR_PATH.equals(name)) {
qs.add(new TermQuery(newPathTerm(first)));
} else {
if ("*".equals(name)) {
addReferenceConstraint(first, qs, reader);
} else {
for (String t : tokenize(first, analyzer)) {
qs.add(new TermQuery(new Term(name, t)));
}
}
}
continue;
}
first = tokenizeAndPoll(first, analyzer);
last = tokenizeAndPoll(last, analyzer);
qs.add(TermRangeQuery.newStringRange(name, first, last, pr.firstIncluding, pr.lastIncluding));
}
}
Aggregations