use of org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class RestCountAction method doCatRequest.
@Override
public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) {
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
SearchRequest countRequest = new SearchRequest(indices);
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0);
countRequest.source(searchSourceBuilder);
try {
request.withContentOrSourceParamParserOrNull(parser -> {
if (parser == null) {
QueryBuilder queryBuilder = RestActions.urlParamsToQueryBuilder(request);
if (queryBuilder != null) {
searchSourceBuilder.query(queryBuilder);
}
} else {
searchSourceBuilder.query(RestActions.getQueryContent(parser));
}
});
} catch (IOException e) {
throw new ElasticsearchException("Couldn't parse query", e);
}
return channel -> client.search(countRequest, new RestResponseListener<SearchResponse>(channel) {
@Override
public RestResponse buildResponse(SearchResponse countResponse) throws Exception {
return RestTable.buildResponse(buildTable(request, countResponse), channel);
}
});
}
use of org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class BestDocsDeferringCollector method runDeferredAggs.
private void runDeferredAggs() throws IOException {
List<ScoreDoc> allDocs = new ArrayList<>(shardSize);
for (int i = 0; i < perBucketSamples.size(); i++) {
PerParentBucketSamples perBucketSample = perBucketSamples.get(i);
if (perBucketSample == null) {
continue;
}
perBucketSample.getMatches(allDocs);
}
// Sort the top matches by docID for the benefit of deferred collector
ScoreDoc[] docsArr = allDocs.toArray(new ScoreDoc[allDocs.size()]);
Arrays.sort(docsArr, (o1, o2) -> {
if (o1.doc == o2.doc) {
return o1.shardIndex - o2.shardIndex;
}
return o1.doc - o2.doc;
});
try {
for (PerSegmentCollects perSegDocs : entries) {
perSegDocs.replayRelatedMatches(docsArr);
}
} catch (IOException e) {
throw new ElasticsearchException("IOException collecting best scoring results", e);
}
deferred.postCollection();
}
use of org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class SuggestPhase method execute.
@Override
public void execute(SearchContext context) {
final SuggestionSearchContext suggest = context.suggest();
if (suggest == null) {
return;
}
try {
CharsRefBuilder spare = new CharsRefBuilder();
final List<Suggestion<? extends Entry<? extends Option>>> suggestions = new ArrayList<>(suggest.suggestions().size());
for (Map.Entry<String, SuggestionSearchContext.SuggestionContext> entry : suggest.suggestions().entrySet()) {
SuggestionSearchContext.SuggestionContext suggestion = entry.getValue();
Suggester<SuggestionContext> suggester = suggestion.getSuggester();
Suggestion<? extends Entry<? extends Option>> result = suggester.execute(entry.getKey(), suggestion, context.searcher(), spare);
if (result != null) {
assert entry.getKey().equals(result.name);
suggestions.add(result);
}
}
context.queryResult().suggest(new Suggest(suggestions));
} catch (IOException e) {
throw new ElasticsearchException("I/O exception during suggest phase", e);
}
}
use of org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class CompletionFieldStats method completionStats.
/**
* Returns total in-heap bytes used by all suggesters. This method has CPU cost <code>O(numIndexedFields)</code>.
*
* @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes
* separately in the returned {@link CompletionStats}
*/
public static CompletionStats completionStats(IndexReader indexReader, String... fieldNamePatterns) {
long sizeInBytes = 0;
ObjectLongHashMap<String> completionFields = null;
if (fieldNamePatterns != null && fieldNamePatterns.length > 0) {
completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
}
for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
LeafReader atomicReader = atomicReaderContext.reader();
try {
Fields fields = atomicReader.fields();
for (String fieldName : fields) {
Terms terms = fields.terms(fieldName);
if (terms instanceof CompletionTerms) {
// TODO: currently we load up the suggester for reporting its size
long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) {
completionFields.addTo(fieldName, fstSize);
}
sizeInBytes += fstSize;
}
}
} catch (IOException ioe) {
throw new ElasticsearchException(ioe);
}
}
return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields));
}
use of org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class BulkItemResponse method fromXContent.
/**
* Reads a {@link BulkItemResponse} from a {@link XContentParser}.
*
* @param parser the {@link XContentParser}
* @param id the id to assign to the parsed {@link BulkItemResponse}. It is usually the index of
* the item in the {@link BulkResponse#getItems} array.
*/
public static BulkItemResponse fromXContent(XContentParser parser, int id) throws IOException {
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String currentFieldName = parser.currentName();
token = parser.nextToken();
final OpType opType = OpType.fromString(currentFieldName);
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
DocWriteResponse.Builder builder = null;
CheckedConsumer<XContentParser, IOException> itemParser = null;
if (opType == OpType.INDEX || opType == OpType.CREATE) {
final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder();
builder = indexResponseBuilder;
itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder);
} else if (opType == OpType.UPDATE) {
final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder();
builder = updateResponseBuilder;
itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder);
} else if (opType == OpType.DELETE) {
final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder();
builder = deleteResponseBuilder;
itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder);
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
RestStatus status = null;
ElasticsearchException exception = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
}
if (ERROR.equals(currentFieldName)) {
if (token == XContentParser.Token.START_OBJECT) {
exception = ElasticsearchException.fromXContent(parser);
}
} else if (STATUS.equals(currentFieldName)) {
if (token == XContentParser.Token.VALUE_NUMBER) {
status = RestStatus.fromCode(parser.intValue());
}
} else {
itemParser.accept(parser);
}
}
ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser::getTokenLocation);
token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser::getTokenLocation);
BulkItemResponse bulkItemResponse;
if (exception != null) {
Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception, status);
bulkItemResponse = new BulkItemResponse(id, opType, failure);
} else {
bulkItemResponse = new BulkItemResponse(id, opType, builder.build());
}
return bulkItemResponse;
}
Aggregations