use of java.util.HashMap in project elasticsearch by elastic.
the class PluginsService method updatedSettings.
public Settings updatedSettings() {
Map<String, String> foundSettings = new HashMap<>();
final Settings.Builder builder = Settings.builder();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
Settings settings = plugin.v2().additionalSettings();
for (String setting : settings.getAsMap().keySet()) {
String oldPlugin = foundSettings.put(setting, plugin.v1().getName());
if (oldPlugin != null) {
throw new IllegalArgumentException("Cannot have additional setting [" + setting + "] " + "in plugin [" + plugin.v1().getName() + "], already added in plugin [" + oldPlugin + "]");
}
}
builder.put(settings);
}
return builder.put(this.settings).build();
}
use of java.util.HashMap in project elasticsearch by elastic.
the class RepositoriesService method registerRepository.
/**
* Creates a new repository and adds it to the list of registered repositories.
* <p>
* If a repository with the same name but different types or settings already exists, it will be closed and
* replaced with the new repository. If a repository with the same name exists but it has the same type and settings
* the new repository is ignored.
*
* @param repositoryMetaData new repository metadata
* @return {@code true} if new repository was added or {@code false} if it was ignored
*/
private boolean registerRepository(RepositoryMetaData repositoryMetaData) throws IOException {
Repository previous = repositories.get(repositoryMetaData.name());
if (previous != null) {
RepositoryMetaData previousMetadata = previous.getMetadata();
if (!previousMetadata.type().equals(repositoryMetaData.type()) && previousMetadata.settings().equals(repositoryMetaData.settings())) {
// Previous version is the same as this one - ignore it
return false;
}
}
Repository newRepo = createRepository(repositoryMetaData);
if (previous != null) {
closeRepository(previous);
}
Map<String, Repository> newRepositories = new HashMap<>(repositories);
newRepositories.put(repositoryMetaData.name(), newRepo);
repositories = newRepositories;
return true;
}
use of java.util.HashMap in project elasticsearch by elastic.
the class PlainHighlighter method highlight.
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
SearchContextHighlight.Field field = highlighterContext.field;
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
FieldMapper mapper = highlighterContext.mapper;
Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
Map<FieldMapper, org.apache.lucene.search.highlight.Highlighter> mappers = new HashMap<>();
hitContext.cache().put(CACHE_KEY, mappers);
}
@SuppressWarnings("unchecked") Map<FieldMapper, org.apache.lucene.search.highlight.Highlighter> cache = (Map<FieldMapper, org.apache.lucene.search.highlight.Highlighter>) hitContext.cache().get(CACHE_KEY);
org.apache.lucene.search.highlight.Highlighter entry = cache.get(mapper);
if (entry == null) {
QueryScorer queryScorer = new CustomQueryScorer(highlighterContext.query, field.fieldOptions().requireFieldMatch() ? mapper.fieldType().name() : null);
queryScorer.setExpandMultiTermQuery(true);
Fragmenter fragmenter;
if (field.fieldOptions().numberOfFragments() == 0) {
fragmenter = new NullFragmenter();
} else if (field.fieldOptions().fragmenter() == null) {
fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize());
} else if ("simple".equals(field.fieldOptions().fragmenter())) {
fragmenter = new SimpleFragmenter(field.fieldOptions().fragmentCharSize());
} else if ("span".equals(field.fieldOptions().fragmenter())) {
fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize());
} else {
throw new IllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]");
}
Formatter formatter = new SimpleHTMLFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0]);
entry = new org.apache.lucene.search.highlight.Highlighter(formatter, encoder, queryScorer);
entry.setTextFragmenter(fragmenter);
// always highlight across all data
entry.setMaxDocCharsToAnalyze(Integer.MAX_VALUE);
cache.put(mapper, entry);
}
// a HACK to make highlighter do highlighting, even though its using the single frag list builder
int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments();
ArrayList<TextFragment> fragsList = new ArrayList<>();
List<Object> textsToHighlight;
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
try {
textsToHighlight = HighlightUtils.loadFieldValues(field, mapper, context, hitContext);
for (Object textToHighlight : textsToHighlight) {
String text;
if (textToHighlight instanceof BytesRef) {
text = mapper.fieldType().valueForDisplay(textToHighlight).toString();
} else {
text = textToHighlight.toString();
}
try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().name(), text)) {
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
// can't perform highlighting if the stream has no terms (binary token stream) or no offsets
continue;
}
TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
for (TextFragment bestTextFragment : bestTextFragments) {
if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
fragsList.add(bestTextFragment);
}
}
}
}
} catch (Exception e) {
if (ExceptionsHelper.unwrap(e, BytesRefHash.MaxBytesLengthExceededException.class) != null) {
// the plain highlighter will parse the source and try to analyze it.
return null;
} else {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
}
if (field.fieldOptions().scoreOrdered()) {
CollectionUtil.introSort(fragsList, new Comparator<TextFragment>() {
@Override
public int compare(TextFragment o1, TextFragment o2) {
return Math.round(o2.getScore() - o1.getScore());
}
});
}
String[] fragments;
// number_of_fragments is set to 0 but we have a multivalued field
if (field.fieldOptions().numberOfFragments() == 0 && textsToHighlight.size() > 1 && fragsList.size() > 0) {
fragments = new String[fragsList.size()];
for (int i = 0; i < fragsList.size(); i++) {
fragments[i] = fragsList.get(i).toString();
}
} else {
// refine numberOfFragments if needed
numberOfFragments = fragsList.size() < numberOfFragments ? fragsList.size() : numberOfFragments;
fragments = new String[numberOfFragments];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = fragsList.get(i).toString();
}
}
if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
if (noMatchSize > 0 && textsToHighlight.size() > 0) {
// Pull an excerpt from the beginning of the string but make sure to split the string on a term boundary.
String fieldContents = textsToHighlight.get(0).toString();
int end;
try {
end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, mapper.fieldType().name(), fieldContents);
} catch (Exception e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
if (end > 0) {
return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
}
}
return null;
}
use of java.util.HashMap in project elasticsearch by elastic.
the class FetchPhase method createSearchHit.
private SearchHit createSearchHit(SearchContext context, FieldsVisitor fieldsVisitor, int docId, int subDocId, LeafReaderContext subReaderContext) {
if (fieldsVisitor == null) {
return new SearchHit(docId);
}
loadStoredFields(context, subReaderContext, fieldsVisitor, subDocId);
fieldsVisitor.postProcess(context.mapperService());
Map<String, SearchHitField> searchFields = null;
if (!fieldsVisitor.fields().isEmpty()) {
searchFields = new HashMap<>(fieldsVisitor.fields().size());
for (Map.Entry<String, List<Object>> entry : fieldsVisitor.fields().entrySet()) {
searchFields.put(entry.getKey(), new SearchHitField(entry.getKey(), entry.getValue()));
}
}
DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
Text typeText;
if (documentMapper == null) {
typeText = new Text(fieldsVisitor.uid().type());
} else {
typeText = documentMapper.typeText();
}
SearchHit searchHit = new SearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields);
// Set _source if requested.
SourceLookup sourceLookup = context.lookup().source();
sourceLookup.setSegmentAndDocument(subReaderContext, subDocId);
if (fieldsVisitor.source() != null) {
sourceLookup.setSource(fieldsVisitor.source());
}
return searchHit;
}
use of java.util.HashMap in project elasticsearch by elastic.
the class FetchPhase method createNestedSearchHit.
private SearchHit createNestedSearchHit(SearchContext context, int nestedTopDocId, int nestedSubDocId, int rootSubDocId, Set<String> fieldNames, List<String> fieldNamePatterns, LeafReaderContext subReaderContext) throws IOException {
// Also if highlighting is requested on nested documents we need to fetch the _source from the root document,
// otherwise highlighting will attempt to fetch the _source from the nested doc, which will fail,
// because the entire _source is only stored with the root document.
final FieldsVisitor rootFieldsVisitor = new FieldsVisitor(context.sourceRequested() || context.highlight() != null);
loadStoredFields(context, subReaderContext, rootFieldsVisitor, rootSubDocId);
rootFieldsVisitor.postProcess(context.mapperService());
Map<String, SearchHitField> searchFields = getSearchFields(context, nestedSubDocId, fieldNames, fieldNamePatterns, subReaderContext);
DocumentMapper documentMapper = context.mapperService().documentMapper(rootFieldsVisitor.uid().type());
SourceLookup sourceLookup = context.lookup().source();
sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId);
ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context, subReaderContext);
assert nestedObjectMapper != null;
SearchHit.NestedIdentity nestedIdentity = getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, documentMapper, nestedObjectMapper);
BytesReference source = rootFieldsVisitor.source();
if (source != null) {
Tuple<XContentType, Map<String, Object>> tuple = XContentHelper.convertToMap(source, true);
Map<String, Object> sourceAsMap = tuple.v2();
// Isolate the nested json array object that matches with nested hit and wrap it back into the same json
// structure with the nested json array object being the actual content. The latter is important, so that
// features like source filtering and highlighting work consistent regardless of whether the field points
// to a json object array for consistency reasons on how we refer to fields
Map<String, Object> nestedSourceAsMap = new HashMap<>();
Map<String, Object> current = nestedSourceAsMap;
for (SearchHit.NestedIdentity nested = nestedIdentity; nested != null; nested = nested.getChild()) {
String nestedPath = nested.getField().string();
current.put(nestedPath, new HashMap<>());
Object extractedValue = XContentMapValues.extractValue(nestedPath, sourceAsMap);
List<Map<String, Object>> nestedParsedSource;
if (extractedValue instanceof List) {
// nested field has an array value in the _source
nestedParsedSource = (List<Map<String, Object>>) extractedValue;
} else if (extractedValue instanceof Map) {
// nested field has an object value in the _source. This just means the nested field has just one inner object, which is valid, but uncommon.
nestedParsedSource = Collections.singletonList((Map<String, Object>) extractedValue);
} else {
throw new IllegalStateException("extracted source isn't an object or an array");
}
sourceAsMap = nestedParsedSource.get(nested.getOffset());
if (nested.getChild() == null) {
current.put(nestedPath, sourceAsMap);
} else {
Map<String, Object> next = new HashMap<>();
current.put(nestedPath, next);
current = next;
}
}
context.lookup().source().setSource(nestedSourceAsMap);
XContentType contentType = tuple.v1();
BytesReference nestedSource = contentBuilder(contentType).map(sourceAsMap).bytes();
context.lookup().source().setSource(nestedSource);
context.lookup().source().setSourceContentType(contentType);
}
return new SearchHit(nestedTopDocId, rootFieldsVisitor.uid().id(), documentMapper.typeText(), nestedIdentity, searchFields);
}
Aggregations