use of org.elasticsearch.search.internal.SearchContext in project elasticsearch by elastic.
the class UnifiedHighlighter method highlight.
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
FieldMapper fieldMapper = highlighterContext.mapper;
SearchContextHighlight.Field field = highlighterContext.field;
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
}
HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
if (mapperHighlighterEntry == null) {
Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder);
mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter);
}
List<Snippet> snippets = new ArrayList<>();
int numberOfFragments;
try {
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
fieldValues = fieldValues.stream().map(obj -> {
if (obj instanceof BytesRef) {
return fieldMapper.fieldType().valueForDisplay(obj).toString();
} else {
return obj;
}
}).collect(Collectors.toList());
IndexSearcher searcher = new IndexSearcher(hitContext.reader());
CustomUnifiedHighlighter highlighter;
if (field.fieldOptions().numberOfFragments() == 0) {
// we use a control char to separate values, which is the only char that the custom break iterator
// breaks the text on, so we don't lose the distinction between the different values of a field and we
// get back a snippet per value
String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator breakIterator = new org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), breakIterator, fieldValue, field.fieldOptions().noMatchSize());
// we are highlighting the whole content, one snippet per value
numberOfFragments = fieldValues.size();
} else {
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
BreakIterator bi = getBreakIterator(field);
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), bi, fieldValue, field.fieldOptions().noMatchSize());
numberOfFragments = field.fieldOptions().numberOfFragments();
}
if (field.fieldOptions().requireFieldMatch()) {
final String fieldName = highlighterContext.fieldName;
highlighter.setFieldMatcher((name) -> fieldName.equals(name));
} else {
highlighter.setFieldMatcher((name) -> true);
}
Snippet[] fieldSnippets = highlighter.highlightField(highlighterContext.fieldName, highlighterContext.query, hitContext.docId(), numberOfFragments);
for (Snippet fieldSnippet : fieldSnippets) {
if (Strings.hasText(fieldSnippet.getText())) {
snippets.add(fieldSnippet);
}
}
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
if (field.fieldOptions().scoreOrdered()) {
//let's sort the snippets by score if needed
CollectionUtil.introSort(snippets, (o1, o2) -> Double.compare(o2.getScore(), o1.getScore()));
}
String[] fragments = new String[snippets.size()];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = snippets.get(i).getText();
}
if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
return null;
}
use of org.elasticsearch.search.internal.SearchContext in project elasticsearch by elastic.
the class TransportExplainAction method shardOperation.
@Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId, new String[] { request.type() }, request.nowInMillis, request.filteringAlias());
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
Engine.GetResult result = null;
try {
result = context.indexShard().get(new Engine.Get(false, uidTerm));
if (!result.exists()) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
}
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
context.preProcess(true);
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext ctx : context.rescore()) {
Rescorer rescorer = ctx.rescorer();
explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
}
if (request.storedFields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call.
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
}
} catch (IOException e) {
throw new ElasticsearchException("Could not explain", e);
} finally {
Releasables.close(result, context);
}
}
use of org.elasticsearch.search.internal.SearchContext in project elasticsearch by elastic.
the class SearchService method executeDfsPhase.
public DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask task) throws IOException {
final SearchContext context = createAndPutContext(request);
context.incRef();
try {
context.setTask(task);
contextProcessing(context);
dfsPhase.execute(context);
contextProcessedSuccessfully(context);
return context.dfsResult();
} catch (Exception e) {
logger.trace("Dfs phase failed", e);
processFailure(context, e);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
use of org.elasticsearch.search.internal.SearchContext in project elasticsearch by elastic.
the class SearchService method executeFetchPhase.
public FetchSearchResult executeFetchPhase(ShardFetchRequest request, SearchTask task) {
final SearchContext context = findContext(request.id());
final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
context.incRef();
try {
context.setTask(task);
contextProcessing(context);
if (request.lastEmittedDoc() != null) {
context.scrollContext().lastEmittedDoc = request.lastEmittedDoc();
}
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
operationListener.onPreFetchPhase(context);
long time = System.nanoTime();
fetchPhase.execute(context);
if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
}
operationListener.onFetchPhase(context, System.nanoTime() - time);
return context.fetchResult();
} catch (Exception e) {
operationListener.onFailedFetchPhase(context);
logger.trace("Fetch phase failed", e);
processFailure(context, e);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
use of org.elasticsearch.search.internal.SearchContext in project elasticsearch by elastic.
the class SearchService method executeQueryPhase.
public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request, SearchTask task) {
final SearchContext context = findContext(request.id());
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
context.incRef();
try {
context.setTask(task);
operationListener.onPreQueryPhase(context);
long time = System.nanoTime();
contextProcessing(context);
processScroll(request, context);
queryPhase.execute(context);
contextProcessedSuccessfully(context);
operationListener.onQueryPhase(context, System.nanoTime() - time);
return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
} catch (Exception e) {
operationListener.onFailedQueryPhase(context);
logger.trace("Query phase failed", e);
processFailure(context, e);
throw ExceptionsHelper.convertToRuntime(e);
} finally {
cleanContext(context);
}
}
Aggregations