use of org.apache.lucene.queryParser.ParseException in project jackrabbit by apache.
the class LuceneQueryBuilder method visit.
public Object visit(TextsearchQueryNode node, Object data) {
try {
Path relPath = node.getRelativePath();
String fieldname;
if (relPath == null || !node.getReferencesProperty()) {
// fulltext on node
fieldname = FieldNames.FULLTEXT;
} else {
// final path element is a property name
Name propName = relPath.getName();
StringBuffer tmp = new StringBuffer();
tmp.append(nsMappings.getPrefix(propName.getNamespaceURI()));
tmp.append(":").append(FieldNames.FULLTEXT_PREFIX);
tmp.append(propName.getLocalName());
fieldname = tmp.toString();
}
QueryParser parser = new JackrabbitQueryParser(fieldname, analyzer, synonymProvider, cache);
Query context = parser.parse(node.getQuery());
if (relPath != null && (!node.getReferencesProperty() || relPath.getLength() > 1)) {
// text search on some child axis
Path.Element[] elements = relPath.getElements();
for (int i = elements.length - 1; i >= 0; i--) {
Name name = null;
if (!elements[i].getName().equals(RelationQueryNode.STAR_NAME_TEST)) {
name = elements[i].getName();
}
// if path references node that's elements.length - 1
if (name != null && ((node.getReferencesProperty() && i == elements.length - 2) || (!node.getReferencesProperty() && i == elements.length - 1))) {
Query q = new NameQuery(name, indexFormatVersion, nsMappings);
BooleanQuery and = new BooleanQuery();
and.add(q, Occur.MUST);
and.add(context, Occur.MUST);
context = and;
} else if ((node.getReferencesProperty() && i < elements.length - 2) || (!node.getReferencesProperty() && i < elements.length - 1)) {
// otherwise do a parent axis step
context = new ParentAxisQuery(context, name, indexFormatVersion, nsMappings);
}
}
// finally select parent
context = new ParentAxisQuery(context, null, indexFormatVersion, nsMappings);
}
return context;
} catch (NamespaceException e) {
exceptions.add(e);
} catch (ParseException e) {
exceptions.add(e);
}
return null;
}
use of org.apache.lucene.queryParser.ParseException in project ddf by codice.
the class ContextualPredicate method matches.
@Override
public boolean matches(Event properties) {
String methodName = "matches";
LOGGER.debug("ENTERING: {}", methodName);
LOGGER.debug("Headers: {}", properties);
ContextualEvaluationCriteria cec = null;
Map<String, Object> contextualMap = (Map<String, Object>) properties.getProperty(PubSubConstants.HEADER_CONTEXTUAL_KEY);
if (contextualMap == null) {
LOGGER.debug("No contextual metadata to search against.");
return false;
}
String operation = (String) properties.getProperty(PubSubConstants.HEADER_OPERATION_KEY);
LOGGER.debug("operation = {}", operation);
String metadata = (String) contextualMap.get("METADATA");
LOGGER.debug("metadata = [{}]", metadata);
// cannot apply any contextual filtering - just send the event on to the subscriber
if (operation.equals(PubSubConstants.DELETE) && metadata.equals(PubSubConstants.METADATA_DELETED)) {
LOGGER.debug("Detected a DELETE operation where metadata is just the word 'deleted', so send event on to subscriber");
return true;
}
// text paths)
if (this.textPaths != null && !this.textPaths.isEmpty()) {
LOGGER.debug("creating criteria with textPaths and metadata document");
try {
cec = new ContextualEvaluationCriteriaImpl(searchPhrase, fuzzy, caseSensitiveSearch, this.textPaths.toArray(new String[this.textPaths.size()]), (String) contextualMap.get("METADATA"));
} catch (IOException e) {
LOGGER.debug("IO exception during context evaluation", e);
return false;
}
// This predicate has no text paths specified, so can use default Lucene search index, which
// indexed the entry's entire metadata
// per the default XPath expressions in ContextualEvaluator, from the event's properties
// data
} else {
LOGGER.debug("using default Lucene search index for metadata");
cec = new ContextualEvaluationCriteriaImpl(searchPhrase, fuzzy, caseSensitiveSearch, (Directory) contextualMap.get("DEFAULT_INDEX"));
}
try {
return ContextualEvaluator.evaluate(cec);
} catch (IOException e) {
LOGGER.debug("IO Exception evaluating context criteria", e);
} catch (ParseException e) {
LOGGER.debug("Parse Exception evaluating context criteria", e);
}
LOGGER.debug("EXITING: {}", methodName);
return false;
}
use of org.apache.lucene.queryParser.ParseException in project alfresco-repository by Alfresco.
the class LuceneQueryEngine method executeQuery.
public QueryEngineResults executeQuery(Query query, QueryOptions options, FunctionEvaluationContext functionContext) {
Set<String> selectorGroup = null;
if (query.getSource() != null) {
List<Set<String>> selectorGroups = query.getSource().getSelectorGroups(functionContext);
if (selectorGroups.size() == 0) {
throw new UnsupportedOperationException("No selectors");
}
if (selectorGroups.size() > 1) {
throw new UnsupportedOperationException("Advanced join is not supported");
}
selectorGroup = selectorGroups.get(0);
}
SearchParameters searchParameters = new SearchParameters();
if (options.getLocales().size() > 0) {
for (Locale locale : options.getLocales()) {
searchParameters.addLocale(locale);
}
}
searchParameters.excludeDataInTheCurrentTransaction(!options.isIncludeInTransactionData());
searchParameters.setSkipCount(options.getSkipCount());
searchParameters.setMaxPermissionChecks(options.getMaxPermissionChecks());
searchParameters.setMaxPermissionCheckTimeMillis(options.getMaxPermissionCheckTimeMillis());
searchParameters.setDefaultFieldName(options.getDefaultFieldName());
searchParameters.setMlAnalaysisMode(options.getMlAnalaysisMode());
if (options.getMaxItems() >= 0) {
searchParameters.setLimitBy(LimitBy.FINAL_SIZE);
searchParameters.setLimit(options.getMaxItems());
searchParameters.setMaxItems(options.getMaxItems());
} else {
searchParameters.setLimitBy(LimitBy.UNLIMITED);
}
searchParameters.setUseInMemorySort(options.getUseInMemorySort());
searchParameters.setMaxRawResultSetSizeForInMemorySort(options.getMaxRawResultSetSizeForInMemorySort());
searchParameters.setBulkFetchEnabled(options.isBulkFetchEnabled());
searchParameters.setQueryConsistency(options.getQueryConsistency());
try {
StoreRef storeRef = options.getStores().get(0);
searchParameters.addStore(storeRef);
if (query instanceof LuceneQueryBuilder) {
SearchService searchService = indexAndSearcher.getSearcher(storeRef, options.isIncludeInTransactionData());
if (searchService instanceof LuceneSearcher) {
LuceneSearcher luceneSearcher = (LuceneSearcher) searchService;
ClosingIndexSearcher searcher = luceneSearcher.getClosingIndexSearcher();
LuceneQueryBuilderContext<org.apache.lucene.search.Query, Sort, ParseException> luceneContext = new LuceneQueryBuilderContextImpl(dictionaryService, namespaceService, tenantService, searchParameters, indexAndSearcher.getDefaultMLSearchAnalysisMode(), searcher.getIndexReader());
@SuppressWarnings("unchecked") LuceneQueryBuilder<org.apache.lucene.search.Query, Sort, ParseException> builder = (LuceneQueryBuilder<org.apache.lucene.search.Query, Sort, ParseException>) query;
org.apache.lucene.search.Query luceneQuery = builder.buildQuery(selectorGroup, luceneContext, functionContext);
if (logger.isDebugEnabled()) {
logger.debug("Executing lucene query: " + luceneQuery);
}
Sort sort = builder.buildSort(selectorGroup, luceneContext, functionContext);
Hits hits = searcher.search(luceneQuery);
boolean postSort = false;
;
if (sort != null) {
postSort = searchParameters.usePostSort(hits.length(), useInMemorySort, maxRawResultSetSizeForInMemorySort);
if (postSort == false) {
hits = searcher.search(luceneQuery, sort);
}
}
ResultSet answer;
ResultSet result = new LuceneResultSet(hits, searcher, nodeService, tenantService, searchParameters, indexAndSearcher);
if (postSort) {
if (sort != null) {
for (SortField sf : sort.getSort()) {
searchParameters.addSort(sf.getField(), !sf.getReverse());
}
}
ResultSet sorted = new SortedResultSet(result, nodeService, builder.buildSortDefinitions(selectorGroup, luceneContext, functionContext), namespaceService, dictionaryService, searchParameters.getSortLocale());
answer = sorted;
} else {
answer = result;
}
ResultSet rs = new PagingLuceneResultSet(answer, searchParameters, nodeService);
Map<Set<String>, ResultSet> map = new HashMap<Set<String>, ResultSet>(1);
map.put(selectorGroup, rs);
return new QueryEngineResults(map);
} else {
throw new UnsupportedOperationException();
}
} else {
throw new UnsupportedOperationException();
}
} catch (ParseException e) {
throw new SearcherException("Failed to parse query: " + e);
} catch (IOException e) {
throw new SearcherException("IO exception during search", e);
}
}
use of org.apache.lucene.queryParser.ParseException in project Openfire by igniterealtime.
the class ArchiveSearcher method luceneSearch.
/**
* Searches the Lucene index for all archived conversations using the specified search.
*
* @param search the search.
* @return the collection of conversations that match the search.
*/
private Collection<Conversation> luceneSearch(ArchiveSearch search) {
try {
IndexSearcher searcher = archiveIndexer.getSearcher();
final StandardAnalyzer analyzer = new StandardAnalyzer();
// Create the query based on the search terms.
Query query = new QueryParser("text", analyzer).parse(search.getQueryString());
// See if the user wants to sort on something other than relevance. If so, we need
// to tell Lucene to do sorting. Default to a null sort so that it has no
// effect if sorting hasn't been selected.
Sort sort = null;
if (search.getSortField() != ArchiveSearch.SortField.relevance) {
if (search.getSortField() == ArchiveSearch.SortField.date) {
sort = new Sort("date", search.getSortOrder() == ArchiveSearch.SortOrder.descending);
}
}
// See if we need to filter on date. Default to a null filter so that it has
// no effect if date filtering hasn't been selected.
Filter filter = null;
if (search.getDateRangeMin() != null || search.getDateRangeMax() != null) {
String min = null;
if (search.getDateRangeMin() != null) {
min = DateTools.dateToString(search.getDateRangeMin(), DateTools.Resolution.DAY);
}
String max = null;
if (search.getDateRangeMax() != null) {
max = DateTools.dateToString(search.getDateRangeMax(), DateTools.Resolution.DAY);
}
// ENT-271: don't include upper or lower bound if these elements are null
filter = new RangeFilter("date", min, max, min != null, max != null);
}
// See if we need to match external conversations. This will only be true
// when less than two conversation participants are specified and external
// wildcard matching is enabled.
Collection<JID> participants = search.getParticipants();
if (search.getParticipants().size() < 2 && search.isExternalWildcardMode()) {
TermQuery externalQuery = new TermQuery(new Term("external", "true"));
// Add this query to the existing query.
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(query, BooleanClause.Occur.MUST);
booleanQuery.add(externalQuery, BooleanClause.Occur.MUST);
query = booleanQuery;
}
// See if we need to restrict the search to certain users.
if (!participants.isEmpty()) {
if (participants.size() == 1) {
String jid = participants.iterator().next().toBareJID();
Query participantQuery = new QueryParser("jid", analyzer).parse(jid);
// Add this query to the existing query.
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(query, BooleanClause.Occur.MUST);
booleanQuery.add(participantQuery, BooleanClause.Occur.MUST);
query = booleanQuery;
} else // Otherwise there are two participants.
{
Iterator<JID> iter = participants.iterator();
String participant1 = iter.next().toBareJID();
String participant2 = iter.next().toBareJID();
BooleanQuery participantQuery = new BooleanQuery();
participantQuery.add(new QueryParser("jid", analyzer).parse(participant1), BooleanClause.Occur.MUST);
participantQuery.add(new QueryParser("jid", analyzer).parse(participant2), BooleanClause.Occur.MUST);
// Add this query to the existing query.
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(query, BooleanClause.Occur.MUST);
booleanQuery.add(participantQuery, BooleanClause.Occur.MUST);
query = booleanQuery;
}
}
Hits hits = searcher.search(query, filter, sort);
int startIndex = search.getStartIndex();
int endIndex = startIndex + search.getNumResults() - 1;
// The end index can't be after the end of the results.
if (endIndex > hits.length() - 1) {
// endIndex = hits.length() - 1;
// TODO: We need to determine if this is necessary.
}
// If the start index is positioned after the end, return an empty list.
if (((endIndex - startIndex) + 1) <= 0) {
return Collections.emptyList();
} else // Otherwise return the results.
{
return new LuceneQueryResults(hits, startIndex, endIndex);
}
} catch (ParseException pe) {
Log.error(pe.getMessage(), pe);
return Collections.emptySet();
} catch (IOException ioe) {
Log.error(ioe.getMessage(), ioe);
return Collections.emptySet();
}
}
use of org.apache.lucene.queryParser.ParseException in project jackrabbit by apache.
the class LuceneQueryFactory method getFullTextSearchQuery.
protected Query getFullTextSearchQuery(FullTextSearch fts) throws RepositoryException {
String field = FieldNames.FULLTEXT;
String property = fts.getPropertyName();
if (property != null) {
Name name = session.getQName(property);
field = nsMappings.getPrefix(name.getNamespaceURI()) + ":" + FieldNames.FULLTEXT_PREFIX + name.getLocalName();
}
StaticOperand expression = fts.getFullTextSearchExpression();
String query = evaluator.getValue(expression).getString();
try {
QueryParser parser = new JackrabbitQueryParser(field, index.getTextAnalyzer(), index.getSynonymProvider(), cache);
return parser.parse(query);
} catch (ParseException e) {
throw new RepositoryException("Invalid full text search expression: " + query, e);
}
}
Aggregations