use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class SolrPluginUtils method optimizePreFetchDocs.
/**
* Pre-fetch documents into the index searcher's document cache.
*
* This is an entirely optional step which you might want to perform for
* the following reasons:
*
* <ul>
* <li>Locates the document-retrieval costs in one spot, which helps
* detailed performance measurement</li>
*
* <li>Determines a priori what fields will be needed to be fetched by
* various subtasks, like response writing and highlighting. This
* minimizes the chance that many needed fields will be loaded lazily.
* (it is more efficient to load all the field we require normally).</li>
* </ul>
*
* If lazy field loading is disabled, this method does nothing.
*/
public static void optimizePreFetchDocs(ResponseBuilder rb, DocList docs, Query query, SolrQueryRequest req, SolrQueryResponse res) throws IOException {
SolrIndexSearcher searcher = req.getSearcher();
if (!searcher.getDocFetcher().isLazyFieldLoadingEnabled()) {
// nothing to do
return;
}
ReturnFields returnFields = res.getReturnFields();
if (returnFields.getLuceneFieldNames() != null) {
Set<String> fieldFilter = returnFields.getLuceneFieldNames();
if (rb.doHighlights) {
// copy return fields list
fieldFilter = new HashSet<>(fieldFilter);
// add highlight fields
SolrHighlighter highlighter = HighlightComponent.getHighlighter(req.getCore());
for (String field : highlighter.getHighlightFields(query, req, null)) fieldFilter.add(field);
// fetch unique key if one exists.
SchemaField keyField = searcher.getSchema().getUniqueKeyField();
if (null != keyField)
fieldFilter.add(keyField.getName());
}
// get documents
DocIterator iter = docs.iterator();
for (int i = 0; i < docs.size(); i++) {
searcher.doc(iter.nextDoc(), fieldFilter);
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class TestInjection method waitForInSyncWithLeader.
@SuppressForbidden(reason = "Need currentTimeMillis, because COMMIT_TIME_MSEC_KEY use currentTimeMillis as value")
public static boolean waitForInSyncWithLeader(SolrCore core, ZkController zkController, String collection, String shardId) throws InterruptedException {
if (waitForReplicasInSync == null)
return true;
log.info("Start waiting for replica in sync with leader");
long currentTime = System.currentTimeMillis();
Pair<Boolean, Integer> pair = parseValue(waitForReplicasInSync);
boolean enabled = pair.first();
if (!enabled)
return true;
long t = System.currentTimeMillis() - 200;
try {
for (int i = 0; i < pair.second(); i++) {
if (core.isClosed())
return true;
Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry(collection, shardId);
try (HttpSolrClient leaderClient = new HttpSolrClient.Builder(leaderReplica.getCoreUrl()).build()) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CommonParams.QT, ReplicationHandler.PATH);
params.set(COMMAND, CMD_DETAILS);
NamedList<Object> response = leaderClient.request(new QueryRequest(params));
long leaderVersion = (long) ((NamedList) response.get("details")).get("indexVersion");
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
String localVersion = searcher.get().getIndexReader().getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (localVersion == null && leaderVersion == 0 && !core.getUpdateHandler().getUpdateLog().hasUncommittedChanges())
return true;
if (localVersion != null && Long.parseLong(localVersion) == leaderVersion && (leaderVersion >= t || i >= 6)) {
log.info("Waiting time for tlog replica to be in sync with leader: {}", System.currentTimeMillis() - currentTime);
return true;
} else {
log.debug("Tlog replica not in sync with leader yet. Attempt: {}. Local Version={}, leader Version={}", i, localVersion, leaderVersion);
Thread.sleep(500);
}
} finally {
searcher.decref();
}
}
}
} catch (Exception e) {
log.error("Exception when wait for replicas in sync with master");
}
return false;
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class LTRRescorer method rescore.
/**
* rescores the documents:
*
* @param searcher
* current IndexSearcher
* @param firstPassTopDocs
* documents to rerank;
* @param topN
* documents to return;
*/
@Override
public TopDocs rescore(IndexSearcher searcher, TopDocs firstPassTopDocs, int topN) throws IOException {
if ((topN == 0) || (firstPassTopDocs.totalHits == 0)) {
return firstPassTopDocs;
}
final ScoreDoc[] hits = firstPassTopDocs.scoreDocs;
Arrays.sort(hits, new Comparator<ScoreDoc>() {
@Override
public int compare(ScoreDoc a, ScoreDoc b) {
return a.doc - b.doc;
}
});
topN = Math.min(topN, firstPassTopDocs.totalHits);
final ScoreDoc[] reranked = new ScoreDoc[topN];
final List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) searcher.createNormalizedWeight(scoringQuery, true);
final SolrIndexSearcher solrIndexSearch = (SolrIndexSearcher) searcher;
scoreFeatures(solrIndexSearch, firstPassTopDocs, topN, modelWeight, hits, leaves, reranked);
// Must sort all documents that we reranked, and then select the top
Arrays.sort(reranked, new Comparator<ScoreDoc>() {
@Override
public int compare(ScoreDoc a, ScoreDoc b) {
// Sort by score descending, then docID ascending:
if (a.score > b.score) {
return -1;
} else if (a.score < b.score) {
return 1;
} else {
// because docIDs are >= 0:
return a.doc - b.doc;
}
}
});
return new TopDocs(firstPassTopDocs.totalHits, reranked, reranked[0].score);
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class LukeRequestHandler method getIndexedFieldsInfo.
private static SimpleOrderedMap<Object> getIndexedFieldsInfo(SolrQueryRequest req) throws Exception {
SolrIndexSearcher searcher = req.getSearcher();
SolrParams params = req.getParams();
Set<String> fields = null;
String fl = params.get(CommonParams.FL);
if (fl != null) {
fields = new TreeSet<>(Arrays.asList(fl.split("[,\\s]+")));
}
LeafReader reader = searcher.getSlowAtomicReader();
IndexSchema schema = searcher.getSchema();
// Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields!
Set<String> fieldNames = new TreeSet<>();
for (FieldInfo fieldInfo : reader.getFieldInfos()) {
fieldNames.add(fieldInfo.name);
}
// Walk the term enum and keep a priority queue for each map in our set
SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
for (String fieldName : fieldNames) {
if (fields != null && !fields.contains(fieldName) && !fields.contains("*")) {
//we're not interested in this field Still an issue here
continue;
}
SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>();
SchemaField sfield = schema.getFieldOrNull(fieldName);
FieldType ftype = (sfield == null) ? null : sfield.getType();
fieldMap.add("type", (ftype == null) ? null : ftype.getTypeName());
fieldMap.add("schema", getFieldFlags(sfield));
if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) {
fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
}
Terms terms = reader.fields().terms(fieldName);
if (terms == null) {
// Not indexed, so we need to report what we can (it made it through the fl param if specified)
finfo.add(fieldName, fieldMap);
continue;
}
if (sfield != null && sfield.indexed()) {
if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS, true)) {
Document doc = getFirstLiveDoc(terms, reader);
if (doc != null) {
// Found a document with this field
try {
IndexableField fld = doc.getField(fieldName);
if (fld != null) {
fieldMap.add("index", getFieldFlags(fld));
} else {
// it is a non-stored field...
fieldMap.add("index", "(unstored field)");
}
} catch (Exception ex) {
log.warn("error reading field: " + fieldName);
}
}
}
fieldMap.add("docs", terms.getDocCount());
}
if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) {
getDetailedFieldInfo(req, fieldName, fieldMap);
}
// Add the field
finfo.add(fieldName, fieldMap);
}
return finfo;
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class TermVectorReusingLeafReader method doHighlighting.
/**
* Generates a list of Highlighted query fragments for each item in a list
* of documents, or returns null if highlighting is disabled.
*
* @param docs query results
* @param query the query
* @param req the current request
* @param defaultFields default list of fields to summarize
*
* @return NamedList containing a NamedList for each document, which in
* turns contains sets (field, summary) pairs.
*/
@Override
@SuppressWarnings("unchecked")
public NamedList<Object> doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException {
SolrParams params = req.getParams();
if (// also returns early if no unique key field
!isHighlightingEnabled(params))
return null;
boolean rewrite = query != null && !(Boolean.valueOf(params.get(HighlightParams.USE_PHRASE_HIGHLIGHTER, "true")) && Boolean.valueOf(params.get(HighlightParams.HIGHLIGHT_MULTI_TERM, "true")));
if (rewrite) {
query = query.rewrite(req.getSearcher().getIndexReader());
}
SolrIndexSearcher searcher = req.getSearcher();
IndexSchema schema = searcher.getSchema();
// fetch unique key if one exists.
SchemaField keyField = schema.getUniqueKeyField();
if (keyField == null) {
//exit early; we need a unique key field to populate the response
return null;
}
String[] fieldNames = getHighlightFields(query, req, defaultFields);
Set<String> preFetchFieldNames = getDocPrefetchFieldNames(fieldNames, req);
if (preFetchFieldNames != null) {
preFetchFieldNames.add(keyField.getName());
}
// Lazy container for fvh and fieldQuery
FvhContainer fvhContainer = new FvhContainer(null, null);
// SOLR-5855
IndexReader reader = new TermVectorReusingLeafReader(req.getSearcher().getSlowAtomicReader());
// Highlight each document
NamedList fragments = new SimpleOrderedMap();
DocIterator iterator = docs.iterator();
for (int i = 0; i < docs.size(); i++) {
int docId = iterator.nextDoc();
Document doc = searcher.doc(docId, preFetchFieldNames);
@SuppressWarnings("rawtypes") NamedList docHighlights = new SimpleOrderedMap();
// Highlight per-field
for (String fieldName : fieldNames) {
SchemaField schemaField = schema.getFieldOrNull(fieldName);
// object type allows flexibility for subclassers
Object fieldHighlights;
fieldHighlights = doHighlightingOfField(doc, docId, schemaField, fvhContainer, query, reader, req, params);
if (fieldHighlights == null) {
fieldHighlights = alternateField(doc, docId, fieldName, fvhContainer, query, reader, req);
}
if (fieldHighlights != null) {
docHighlights.add(fieldName, fieldHighlights);
}
}
// for each field
fragments.add(schema.printableUniqueKey(doc), docHighlights);
}
// for each doc
return fragments;
}
Aggregations