use of org.apache.lucene.search.BooleanQuery in project OpenOLAT by OpenOLAT.
the class SearchServiceImpl method createQuery.
protected BooleanQuery createQuery(String queryString, List<String> condQueries) throws ParseException {
BooleanQuery query = new BooleanQuery();
if (StringHelper.containsNonWhitespace(queryString)) {
String[] fieldsArr = getFieldsToSearchIn();
QueryParser queryParser = new MultiFieldQueryParser(SearchService.OO_LUCENE_VERSION, fieldsArr, analyzer);
// some add. fields are not tokenized and not lowered case
queryParser.setLowercaseExpandedTerms(false);
Query multiFieldQuery = queryParser.parse(queryString.toLowerCase());
query.add(multiFieldQuery, Occur.MUST);
}
if (condQueries != null && !condQueries.isEmpty()) {
for (String condQueryString : condQueries) {
QueryParser condQueryParser = new QueryParser(SearchService.OO_LUCENE_VERSION, condQueryString, analyzer);
condQueryParser.setLowercaseExpandedTerms(false);
Query condQuery = condQueryParser.parse(condQueryString);
query.add(condQuery, Occur.MUST);
}
}
return query;
}
use of org.apache.lucene.search.BooleanQuery in project polymap4-core by Polymap4.
the class LuceneRecordStore method find.
@Override
public ResultSet find(RecordQuery query) throws Exception {
assert !isClosed() : "Store is closed already.";
// SimpleQuery
if (query instanceof SimpleQuery) {
Query luceneQuery = null;
Collection<QueryExpression> expressions = ((SimpleQuery) query).expressions();
if (expressions.isEmpty()) {
luceneQuery = new MatchAllDocsQuery();
} else {
luceneQuery = new BooleanQuery();
for (QueryExpression exp : expressions) {
((BooleanQuery) luceneQuery).add(valueCoders.searchQuery(exp), BooleanClause.Occur.MUST);
}
}
return new LuceneRecordQuery(this, luceneQuery).setMaxResults(query.getMaxResults()).setFirstResult(query.getFirstResult()).sort(query.getSortKey(), query.getSortOrder(), query.getSortType()).execute();
} else // other
{
return query.execute();
}
}
use of org.apache.lucene.search.BooleanQuery in project SearchServices by Alfresco.
the class SolrInformationServer method cascadeUpdateV2.
private void cascadeUpdateV2(NodeMetaData parentNodeMetaData, boolean overwrite, SolrQueryRequest request, UpdateRequestProcessor processor) throws AuthenticationException, IOException, JSONException {
// System.out.println("################ Cascade update V2 !");
RefCounted<SolrIndexSearcher> refCounted = null;
IntArrayList docList = null;
HashSet<Long> childIds = new HashSet<Long>();
try {
refCounted = core.getSearcher();
SolrIndexSearcher searcher = refCounted.get();
BooleanQuery.Builder builder = new BooleanQuery.Builder();
TermQuery termQuery = new TermQuery(new Term(FIELD_ANCESTOR, parentNodeMetaData.getNodeRef().toString()));
BooleanClause booleanClause = new BooleanClause(termQuery, BooleanClause.Occur.MUST);
builder.add(booleanClause);
BooleanQuery booleanQuery = builder.build();
// System.out.println("################ ANCESTOR QUERY:"+booleanQuery.toString());
DocListCollector collector = new DocListCollector();
searcher.search(booleanQuery, collector);
docList = collector.getDocs();
int size = docList.size();
Set set = new HashSet();
set.add(FIELD_SOLR4_ID);
for (int i = 0; i < size; i++) {
int docId = docList.get(i);
Document document = searcher.doc(docId, set);
IndexableField indexableField = document.getField(FIELD_SOLR4_ID);
String id = indexableField.stringValue();
TenantAclIdDbId ids = AlfrescoSolrDataModel.decodeNodeDocumentId(id);
childIds.add(ids.dbId);
}
} finally {
refCounted.decref();
}
for (Long childId : childIds) {
NodeMetaDataParameters nmdp = new NodeMetaDataParameters();
nmdp.setFromNodeId(childId);
nmdp.setToNodeId(childId);
nmdp.setIncludeAclId(false);
nmdp.setIncludeAspects(false);
nmdp.setIncludeChildAssociations(false);
nmdp.setIncludeChildIds(true);
nmdp.setIncludeNodeRef(false);
nmdp.setIncludeOwner(false);
nmdp.setIncludeParentAssociations(false);
// We only care about the path and ancestors (which is included) for this case
nmdp.setIncludePaths(true);
nmdp.setIncludeProperties(false);
nmdp.setIncludeType(false);
nmdp.setIncludeTxnId(true);
// Gets only one
List<NodeMetaData> nodeMetaDatas = repositoryClient.getNodesMetaData(nmdp, 1);
if (!nodeMetaDatas.isEmpty()) {
NodeMetaData nodeMetaData = nodeMetaDatas.get(0);
// We do not bring in changes from the future as nodes may switch shards and we do not want the logic here.
if (nodeMetaData.getTxnId() < parentNodeMetaData.getTxnId()) {
long nodeId = nodeMetaData.getId();
try {
if (!spinLock(nodeId, 120000)) {
throw new IOException("Unable to acquire spinlock on:" + nodeId);
}
// System.out.println("################ Starting CASCADE UPDATE:"+nodeMetaData.getId());
if (log.isDebugEnabled()) {
log.debug("... cascade update child doc " + childId);
}
// Gets the document that we have from the content store and updates it
String fixedTenantDomain = AlfrescoSolrDataModel.getTenantId(nodeMetaData.getTenantDomain());
SolrInputDocument cachedDoc = solrContentStore.retrieveDocFromSolrContentStore(fixedTenantDomain, nodeMetaData.getId());
if (cachedDoc == null) {
cachedDoc = recreateSolrDoc(nodeMetaData.getId(), fixedTenantDomain);
// This is a work around for ACE-3228/ACE-3258 and the way stores are expunged when deleting a tenant
if (cachedDoc == null) {
deleteNode(processor, request, nodeMetaData.getId());
}
}
if (cachedDoc != null) {
updatePathRelatedFields(nodeMetaData, cachedDoc);
updateNamePathRelatedFields(nodeMetaData, cachedDoc);
updateAncestorRelatedFields(nodeMetaData, cachedDoc);
AddUpdateCommand addDocCmd = new AddUpdateCommand(request);
addDocCmd.overwrite = overwrite;
addDocCmd.solrDoc = cachedDoc;
// System.out.println("######## Final Cascade Doc :"+cachedDoc);
processor.processAdd(addDocCmd);
solrContentStore.storeDocOnSolrContentStore(fixedTenantDomain, nodeMetaData.getId(), cachedDoc);
} else {
if (log.isDebugEnabled()) {
log.debug("... no child doc found to update " + childId);
}
}
} finally {
unlock(nodeId);
}
}
}
}
}
use of org.apache.lucene.search.BooleanQuery in project SearchServices by Alfresco.
the class AlfrescoSolrFingerprintTest method testBasciFingerPrint.
@Test
public void testBasciFingerPrint() throws Exception {
/*
* Create and index an AclChangeSet.
*/
logger.info("######### Starting fingerprint test ###########");
AclChangeSet aclChangeSet = getAclChangeSet(1);
Acl acl = getAcl(aclChangeSet);
Acl acl2 = getAcl(aclChangeSet);
AclReaders aclReaders = getAclReaders(aclChangeSet, acl, list("joel"), list("phil"), null);
AclReaders aclReaders2 = getAclReaders(aclChangeSet, acl2, list("jim"), list("phil"), null);
indexAclChangeSet(aclChangeSet, list(acl, acl2), list(aclReaders, aclReaders2));
// Check for the ACL state stamp.
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(new BooleanClause(new TermQuery(new Term(QueryConstants.FIELD_SOLR4_ID, "TRACKER!STATE!ACLTX")), BooleanClause.Occur.MUST));
builder.add(new BooleanClause(LegacyNumericRangeQuery.newLongRange(QueryConstants.FIELD_S_ACLTXID, aclChangeSet.getId(), aclChangeSet.getId() + 1, true, false), BooleanClause.Occur.MUST));
BooleanQuery waitForQuery = builder.build();
waitForDocCount(waitForQuery, 1, MAX_WAIT_TIME);
logger.info("#################### Passed First Test ##############################");
/*
* Create and index a Transaction
*/
// First create a transaction.
Transaction txn = getTransaction(0, 4);
// Next create two nodes to update for the transaction
Node node1 = getNode(txn, acl, Node.SolrApiNodeStatus.UPDATED);
Node node2 = getNode(txn, acl, Node.SolrApiNodeStatus.UPDATED);
Node node3 = getNode(txn, acl, Node.SolrApiNodeStatus.UPDATED);
Node node4 = getNode(txn, acl, Node.SolrApiNodeStatus.UPDATED);
// Next create the NodeMetaData for each node. TODO: Add more metadata
NodeMetaData nodeMetaData1 = getNodeMetaData(node1, txn, acl, "mike", null, false);
NodeMetaData nodeMetaData2 = getNodeMetaData(node2, txn, acl, "mike", null, false);
NodeMetaData nodeMetaData3 = getNodeMetaData(node3, txn, acl, "mike", null, false);
NodeMetaData nodeMetaData4 = getNodeMetaData(node4, txn, acl, "mike", null, false);
List<String> content = new ArrayList();
int[] sizes = { 2000, 1000, 1500, 750 };
Random r = new Random(1);
String token1 = Integer.toString(Math.abs(r.nextInt()));
for (int i = 0; i < 4; i++) {
Random rand = new Random(1);
StringBuilder buf = new StringBuilder();
int size = sizes[i];
for (int s = 0; s < size; s++) {
if (s > 0) {
buf.append(" ");
}
buf.append(Integer.toString(Math.abs(rand.nextInt())));
}
content.add(buf.toString());
}
// Index the transaction, nodes, and nodeMetaDatas.
// Note that the content is automatically created by the test framework.
indexTransaction(txn, list(node1, node2, node3, node4), list(nodeMetaData1, nodeMetaData2, nodeMetaData3, nodeMetaData4), content);
// Check for the TXN state stamp.
logger.info("#################### Started Second Test ##############################");
builder = new BooleanQuery.Builder();
builder.add(new BooleanClause(new TermQuery(new Term(QueryConstants.FIELD_SOLR4_ID, "TRACKER!STATE!TX")), BooleanClause.Occur.MUST));
builder.add(new BooleanClause(LegacyNumericRangeQuery.newLongRange(QueryConstants.FIELD_S_TXID, txn.getId(), txn.getId() + 1, true, false), BooleanClause.Occur.MUST));
waitForQuery = builder.build();
waitForDocCount(waitForQuery, 1, MAX_WAIT_TIME);
logger.info("#################### Passed Second Test ##############################");
/*
* Query the index for the content
*/
waitForDocCount(new TermQuery(new Term(QueryConstants.FIELD_READER, "jim")), 1, MAX_WAIT_TIME);
waitForDocCount(new TermQuery(new Term("content@s___t@{http://www.alfresco.org/model/content/1.0}content", token1)), 4, MAX_WAIT_TIME);
logger.info("#################### Passed Third Test ##############################");
ModifiableSolrParams params = new ModifiableSolrParams();
// Query for an id in the content field. The node id is automatically populated into the content field by test framework
params.add("q", "FINGERPRINT:" + node1.getId());
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
SolrServletRequest req = areq(params, null);
assertQ(req, "*[count(//doc)=4]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node2.getId() + "']", "//result/doc[4]/long[@name='DBID'][.='" + node4.getId() + "']");
params = new ModifiableSolrParams();
// Query for an id in the content field. The node id is automatically populated into the content field by test framework
params.add("q", "FINGERPRINT:" + node1.getId() + "_70");
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 2]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']");
params = new ModifiableSolrParams();
// Query for an id in the content field. The node id is automatically populated into the content field by test framework
params.add("q", "FINGERPRINT:" + node1.getId() + "_45");
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 3]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node2.getId() + "']");
params = new ModifiableSolrParams();
params.add("q", "FINGERPRINT:" + node1.getId() + "_30");
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 4]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node2.getId() + "']", "//result/doc[4]/long[@name='DBID'][.='" + node4.getId() + "']");
params = new ModifiableSolrParams();
params.add("q", "FINGERPRINT:" + node4.getId());
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 4]", "//result/doc[1]/long[@name='DBID'][.='" + node4.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node2.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[4]/long[@name='DBID'][.='" + node1.getId() + "']");
// Test nodeRef
params = new ModifiableSolrParams();
// Query for an id in the content field. The node id is automatically populated into the content field by test framework
params.add("q", "FINGERPRINT:" + nodeMetaData1.getNodeRef().getId());
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)=4]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node2.getId() + "']", "//result/doc[4]/long[@name='DBID'][.='" + node4.getId() + "']");
params = new ModifiableSolrParams();
// Query for an id in the content field. The node id is automatically populated into the content field by test framework
params.add("q", "FINGERPRINT:" + nodeMetaData1.getNodeRef().getId() + "_70");
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 2]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']");
params = new ModifiableSolrParams();
// Query for an id in the content field. The node id is automatically populated into the content field by test framework
params.add("q", "FINGERPRINT:" + nodeMetaData1.getNodeRef().getId() + "_45");
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 3]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node2.getId() + "']");
params = new ModifiableSolrParams();
params.add("q", "FINGERPRINT:" + nodeMetaData1.getNodeRef().getId() + "_30");
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 4]", "//result/doc[1]/long[@name='DBID'][.='" + node1.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node2.getId() + "']", "//result/doc[4]/long[@name='DBID'][.='" + node4.getId() + "']");
params = new ModifiableSolrParams();
params.add("q", "FINGERPRINT:" + nodeMetaData4.getNodeRef().getId());
params.add("qt", "/afts");
params.add("fl", "DBID,score");
params.add("start", "0");
params.add("rows", "6");
req = areq(params, null);
assertQ(req, "*[count(//doc)= 4]", "//result/doc[1]/long[@name='DBID'][.='" + node4.getId() + "']", "//result/doc[2]/long[@name='DBID'][.='" + node2.getId() + "']", "//result/doc[3]/long[@name='DBID'][.='" + node3.getId() + "']", "//result/doc[4]/long[@name='DBID'][.='" + node1.getId() + "']");
}
use of org.apache.lucene.search.BooleanQuery in project SearchServices by Alfresco.
the class Solr4QueryParser method spanQueryBuilder.
private Query spanQueryBuilder(String field, String first, String last, int slop, boolean inOrder) throws ParseException {
String propertyFieldName = field.substring(1);
String expandedFieldName = null;
PropertyDefinition propertyDef = QueryParserUtils.matchPropertyDefinition(searchParameters.getNamespace(), namespacePrefixResolver, dictionaryService, propertyFieldName);
IndexTokenisationMode tokenisationMode = IndexTokenisationMode.TRUE;
if (propertyDef != null) {
tokenisationMode = propertyDef.getIndexTokenisationMode();
if (tokenisationMode == null) {
tokenisationMode = IndexTokenisationMode.TRUE;
}
} else {
expandedFieldName = expandAttributeFieldName(field);
}
if ((propertyDef != null) && (propertyDef.getDataType().getName().equals(DataTypeDefinition.MLTEXT))) {
// Build a sub query for each locale and or the results together -
// the analysis will take care of
// cross language matching for each entry
BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
List<Locale> locales = searchParameters.getLocales();
List<Locale> expandedLocales = new ArrayList<Locale>();
for (Locale locale : (((locales == null) || (locales.size() == 0)) ? Collections.singletonList(I18NUtil.getLocale()) : locales)) {
expandedLocales.addAll(MLAnalysisMode.getLocales(mlAnalysisMode, locale, false));
}
for (Locale locale : (((expandedLocales == null) || (expandedLocales.size() == 0)) ? Collections.singletonList(I18NUtil.getLocale()) : expandedLocales)) {
addMLTextSpanQuery(field, propertyDef, first, last, slop, inOrder, expandedFieldName, propertyDef, tokenisationMode, booleanQuery, locale);
}
return booleanQuery.build();
} else // Content
if ((propertyDef != null) && (propertyDef.getDataType().getName().equals(DataTypeDefinition.CONTENT))) {
List<Locale> locales = searchParameters.getLocales();
List<Locale> expandedLocales = new ArrayList<Locale>();
for (Locale locale : (((locales == null) || (locales.size() == 0)) ? Collections.singletonList(I18NUtil.getLocale()) : locales)) {
expandedLocales.addAll(MLAnalysisMode.getLocales(mlAnalysisMode, locale, addContentCrossLocaleWildcards()));
}
return addContentSpanQuery(field, propertyDef, first, last, slop, inOrder, expandedFieldName, expandedLocales);
} else if ((propertyDef != null) && (propertyDef.getDataType().getName().equals(DataTypeDefinition.TEXT))) {
BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
List<Locale> locales = searchParameters.getLocales();
List<Locale> expandedLocales = new ArrayList<Locale>();
for (Locale locale : (((locales == null) || (locales.size() == 0)) ? Collections.singletonList(I18NUtil.getLocale()) : locales)) {
expandedLocales.addAll(MLAnalysisMode.getLocales(mlAnalysisMode, locale, false));
}
for (Locale locale : (((expandedLocales == null) || (expandedLocales.size() == 0)) ? Collections.singletonList(I18NUtil.getLocale()) : expandedLocales)) {
addTextSpanQuery(field, propertyDef, first, last, slop, inOrder, expandedFieldName, tokenisationMode, booleanQuery, locale);
}
return booleanQuery.build();
} else {
throw new UnsupportedOperationException("Span queries are only supported for d:text, d:mltext and d:content data types");
}
}
Aggregations