use of org.elasticsearch.tasks.TaskCancelledException in project elasticsearch by elastic.
the class FetchPhase method execute.
@Override
public void execute(SearchContext context) {
final FieldsVisitor fieldsVisitor;
Set<String> fieldNames = null;
List<String> fieldNamePatterns = null;
StoredFieldsContext storedFieldsContext = context.storedFieldsContext();
if (storedFieldsContext == null) {
// no fields specified, default to return source if no explicit indication
if (!context.hasScriptFields() && !context.hasFetchSourceContext()) {
context.fetchSourceContext(new FetchSourceContext(true));
}
fieldsVisitor = new FieldsVisitor(context.sourceRequested());
} else if (storedFieldsContext.fetchFields() == false) {
// disable stored fields entirely
fieldsVisitor = null;
} else {
for (String fieldName : context.storedFieldsContext().fieldNames()) {
if (fieldName.equals(SourceFieldMapper.NAME)) {
FetchSourceContext fetchSourceContext = context.hasFetchSourceContext() ? context.fetchSourceContext() : FetchSourceContext.FETCH_SOURCE;
context.fetchSourceContext(new FetchSourceContext(true, fetchSourceContext.includes(), fetchSourceContext.excludes()));
continue;
}
if (Regex.isSimpleMatchPattern(fieldName)) {
if (fieldNamePatterns == null) {
fieldNamePatterns = new ArrayList<>();
}
fieldNamePatterns.add(fieldName);
} else {
MappedFieldType fieldType = context.smartNameFieldType(fieldName);
if (fieldType == null) {
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
if (context.getObjectMapper(fieldName) != null) {
throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field");
}
}
if (fieldNames == null) {
fieldNames = new HashSet<>();
}
fieldNames.add(fieldName);
}
}
boolean loadSource = context.sourceRequested();
if (fieldNames == null && fieldNamePatterns == null) {
// empty list specified, default to disable _source if no explicit indication
fieldsVisitor = new FieldsVisitor(loadSource);
} else {
fieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames, fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, loadSource);
}
}
SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()];
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled");
}
int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];
int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
int subDocId = docId - subReaderContext.docBase;
final SearchHit searchHit;
try {
int rootDocId = findRootDocumentIfNested(context, subReaderContext, subDocId);
if (rootDocId != -1) {
searchHit = createNestedSearchHit(context, docId, subDocId, rootDocId, fieldNames, fieldNamePatterns, subReaderContext);
} else {
searchHit = createSearchHit(context, fieldsVisitor, docId, subDocId, subReaderContext);
}
} catch (IOException e) {
throw ExceptionsHelper.convertToElastic(e);
}
hits[index] = searchHit;
hitContext.reset(searchHit, subReaderContext, subDocId, context.searcher());
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
fetchSubPhase.hitExecute(context, hitContext);
}
}
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
fetchSubPhase.hitsExecute(context, hits);
}
context.fetchResult().hits(new SearchHits(hits, context.queryResult().topDocs().totalHits, context.queryResult().topDocs().getMaxScore()));
}
use of org.elasticsearch.tasks.TaskCancelledException in project elasticsearch by elastic.
the class DfsPhase method execute.
@Override
public void execute(SearchContext context) {
final ObjectHashSet<Term> termsSet = new ObjectHashSet<>();
try {
context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet));
for (RescoreSearchContext rescoreContext : context.rescore()) {
rescoreContext.rescorer().extractTerms(context, rescoreContext, new DelegateSet(termsSet));
}
Term[] terms = termsSet.toArray(Term.class);
TermStatistics[] termStatistics = new TermStatistics[terms.length];
IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext();
for (int i = 0; i < terms.length; i++) {
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled");
}
// LUCENE 4 UPGRADE: cache TermContext?
TermContext termContext = TermContext.build(indexReaderContext, terms[i]);
termStatistics[i] = context.searcher().termStatistics(terms[i], termContext);
}
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
for (Term term : terms) {
assert term.field() != null : "field is null";
if (!fieldStatistics.containsKey(term.field())) {
final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field());
fieldStatistics.put(term.field(), collectionStatistics);
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled");
}
}
}
context.dfsResult().termsStatistics(terms, termStatistics).fieldStatistics(fieldStatistics).maxDoc(context.searcher().getIndexReader().maxDoc());
} catch (Exception e) {
throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e);
} finally {
// don't hold on to terms
termsSet.clear();
}
}
Aggregations