use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.
the class UnifiedHighlighter method highlight.
@Override
public HighlightField highlight(HighlighterContext highlighterContext) {
FieldMapper fieldMapper = highlighterContext.mapper;
SearchContextHighlight.Field field = highlighterContext.field;
SearchContext context = highlighterContext.context;
FetchSubPhase.HitContext hitContext = highlighterContext.hitContext;
if (!hitContext.cache().containsKey(CACHE_KEY)) {
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
}
HighlighterEntry highlighterEntry = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
MapperHighlighterEntry mapperHighlighterEntry = highlighterEntry.mappers.get(fieldMapper);
if (mapperHighlighterEntry == null) {
Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0], encoder);
mapperHighlighterEntry = new MapperHighlighterEntry(passageFormatter);
}
List<Snippet> snippets = new ArrayList<>();
int numberOfFragments;
try {
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer();
List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
fieldValues = fieldValues.stream().map(obj -> {
if (obj instanceof BytesRef) {
return fieldMapper.fieldType().valueForDisplay(obj).toString();
} else {
return obj;
}
}).collect(Collectors.toList());
IndexSearcher searcher = new IndexSearcher(hitContext.reader());
CustomUnifiedHighlighter highlighter;
if (field.fieldOptions().numberOfFragments() == 0) {
// we use a control char to separate values, which is the only char that the custom break iterator
// breaks the text on, so we don't lose the distinction between the different values of a field and we
// get back a snippet per value
String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator breakIterator = new org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), breakIterator, fieldValue, field.fieldOptions().noMatchSize());
// we are highlighting the whole content, one snippet per value
numberOfFragments = fieldValues.size();
} else {
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
BreakIterator bi = getBreakIterator(field);
highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), bi, fieldValue, field.fieldOptions().noMatchSize());
numberOfFragments = field.fieldOptions().numberOfFragments();
}
if (field.fieldOptions().requireFieldMatch()) {
final String fieldName = highlighterContext.fieldName;
highlighter.setFieldMatcher((name) -> fieldName.equals(name));
} else {
highlighter.setFieldMatcher((name) -> true);
}
Snippet[] fieldSnippets = highlighter.highlightField(highlighterContext.fieldName, highlighterContext.query, hitContext.docId(), numberOfFragments);
for (Snippet fieldSnippet : fieldSnippets) {
if (Strings.hasText(fieldSnippet.getText())) {
snippets.add(fieldSnippet);
}
}
} catch (IOException e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
if (field.fieldOptions().scoreOrdered()) {
//let's sort the snippets by score if needed
CollectionUtil.introSort(snippets, (o1, o2) -> Double.compare(o2.getScore(), o1.getScore()));
}
String[] fragments = new String[snippets.size()];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = snippets.get(i).getText();
}
if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
return null;
}
use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.
the class TransportGetFieldMappingsIndexAction method findFieldMappingsByType.
private Map<String, FieldMappingMetaData> findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) {
MapBuilder<String, FieldMappingMetaData> fieldMappings = new MapBuilder<>();
final DocumentFieldMappers allFieldMappers = documentMapper.mappers();
for (String field : request.fields()) {
if (Regex.isMatchAllPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
}
} else if (Regex.isSimpleMatchPattern(field)) {
for (FieldMapper fieldMapper : allFieldMappers) {
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
}
}
} else {
// not a pattern
FieldMapper fieldMapper = allFieldMappers.smartNameFieldMapper(field);
if (fieldMapper != null) {
addFieldMapper(field, fieldMapper, fieldMappings, request.includeDefaults());
} else if (request.probablySingleFieldRequest()) {
fieldMappings.put(field, FieldMappingMetaData.NULL);
}
}
}
return fieldMappings.immutableMap();
}
use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch by elastic.
the class GeoContextMapping method parseContext.
/**
* Parse a set of {@link CharSequence} contexts at index-time.
* Acceptable formats:
*
* <ul>
* <li>Array: <pre>[<i><GEO POINT></i>, ..]</pre></li>
* <li>String/Object/Array: <pre>"GEO POINT"</pre></li>
* </ul>
*
* see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT
*/
@Override
public Set<CharSequence> parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException {
if (fieldName != null) {
FieldMapper mapper = parseContext.docMapper().mappers().getMapper(fieldName);
if (!(mapper instanceof GeoPointFieldMapper)) {
throw new ElasticsearchParseException("referenced field must be mapped to geo_point");
}
}
final Set<CharSequence> contexts = new HashSet<>();
Token token = parser.currentToken();
if (token == Token.START_ARRAY) {
token = parser.nextToken();
// Test if value is a single point in <code>[lon, lat]</code> format
if (token == Token.VALUE_NUMBER) {
double lon = parser.doubleValue();
if (parser.nextToken() == Token.VALUE_NUMBER) {
double lat = parser.doubleValue();
if (parser.nextToken() == Token.END_ARRAY) {
contexts.add(stringEncode(lon, lat, precision));
} else {
throw new ElasticsearchParseException("only two values [lon, lat] expected");
}
} else {
throw new ElasticsearchParseException("latitude must be a numeric value");
}
} else {
while (token != Token.END_ARRAY) {
GeoPoint point = GeoUtils.parseGeoPoint(parser);
contexts.add(stringEncode(point.getLon(), point.getLat(), precision));
token = parser.nextToken();
}
}
} else if (token == Token.VALUE_STRING) {
final String geoHash = parser.text();
final CharSequence truncatedGeoHash = geoHash.subSequence(0, Math.min(geoHash.length(), precision));
contexts.add(truncatedGeoHash);
} else {
// or a single location
GeoPoint point = GeoUtils.parseGeoPoint(parser);
contexts.add(stringEncode(point.getLon(), point.getLat(), precision));
}
return contexts;
}
use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch-skywalker by jprante.
the class Skywalker method getFieldInfo.
public Map<String, Object> getFieldInfo(MapperService mapperService, FieldInfo fi) {
Map<String, Object> m = new HashMap();
m.put("name", fi.name);
m.put("number", fi.number);
m.put("isIndexed", fi.isIndexed());
m.put("hasDocValues", fi.hasDocValues());
m.put("hasNorms", fi.hasNorms());
m.put("hasPayloads", fi.hasPayloads());
m.put("hasVectors", fi.hasVectors());
if (fi.getDocValuesType() != null) {
m.put("docValuesType", fi.getDocValuesType().name());
}
if (fi.getNormType() != null) {
m.put("normType", fi.getNormType().name());
}
if (fi.getIndexOptions() != null) {
m.put("options", fi.getIndexOptions().name());
}
m.put("attributes", fi.attributes());
FieldMapper fieldMapper = mapperService.smartNameFieldMapper(fi.name);
if (fieldMapper != null) {
Map<String, Object> mapper = new HashMap();
mapper.put("fullName", fieldMapper.names().fullName());
mapper.put("indexName", fieldMapper.names().indexName());
mapper.put("indexNameClean", fieldMapper.names().indexNameClean());
mapper.put("boost", fieldMapper.boost());
if (fieldMapper.indexAnalyzer() != null) {
mapper.put("indexAnalyzer", fieldMapper.indexAnalyzer().toString());
}
if (fieldMapper.searchAnalyzer() != null) {
mapper.put("searchAnalyzer", fieldMapper.searchAnalyzer().toString());
}
if (fieldMapper.searchQuoteAnalyzer() != null) {
mapper.put("searchQuoteAnalyzer", fieldMapper.searchQuoteAnalyzer().toString());
}
FieldDataType dataType = fieldMapper.fieldDataType();
if (dataType != null) {
mapper.put("fieldDataType", dataType.getType());
}
FieldType type = fieldMapper.fieldType();
if (type != null) {
mapper.put("indexed", type.indexed());
mapper.put("stored", type.stored());
mapper.put("tokenized", type.tokenized());
mapper.put("omitNorms", type.omitNorms());
mapper.put("storeTermVectors", type.storeTermVectors());
mapper.put("storeTermVectorOffsets", type.storeTermVectorOffsets());
mapper.put("storeTermVectorPayloads", type.storeTermVectorPayloads());
mapper.put("storeTermVectorPositions", type.storeTermVectorPositions());
if (type.numericType() != null) {
mapper.put("numericType", type.numericType().name());
mapper.put("numericPrecisionStep", type.numericPrecisionStep());
}
if (type.docValueType() != null) {
mapper.put("docValueType", type.docValueType().name());
}
}
SimilarityProvider similarityProvider = fieldMapper.similarity();
if (similarityProvider != null) {
mapper.put("similarityPovider", similarityProvider.name());
mapper.put("similarity", similarityProvider.get().getClass().getName());
}
PostingsFormatProvider postingsFormatProvider = fieldMapper.postingsFormatProvider();
if (postingsFormatProvider != null) {
mapper.put("postingsFormatProvider", postingsFormatProvider.name());
mapper.put("postingsFormat", postingsFormatProvider.get().getName());
}
m.put("mapper", mapper);
}
return m;
}
use of org.elasticsearch.index.mapper.FieldMapper in project elasticsearch-suggest-plugin by spinscale.
the class AbstractCacheLoaderSuggester method load.
@Override
public T load(ShardSuggestService.FieldType fieldType) throws Exception {
MapperService.SmartNameFieldMappers fieldMappers = mapperService.smartName(fieldType.field(), fieldType.types());
Analyzer queryAnalyzer = null;
Analyzer indexAnalyzer = null;
if (fieldMappers != null) {
FieldMapper fieldMapper = mapperService.smartName(fieldType.field(), fieldType.types()).mapper();
queryAnalyzer = fieldMapper.searchAnalyzer();
if (Strings.hasLength(fieldType.indexAnalyzer())) {
NamedAnalyzer namedAnalyzer = analysisService.analyzer(fieldType.queryAnalyzer());
if (namedAnalyzer == null) {
throw new ElasticsearchException("Query analyzer[" + fieldType.queryAnalyzer() + "] does not exist.");
}
queryAnalyzer = namedAnalyzer.analyzer();
}
indexAnalyzer = fieldMapper.searchAnalyzer();
if (Strings.hasLength(fieldType.indexAnalyzer())) {
NamedAnalyzer namedAnalyzer = analysisService.analyzer(fieldType.indexAnalyzer());
if (namedAnalyzer == null) {
throw new ElasticsearchException("Index analyzer[" + fieldType.indexAnalyzer() + "] does not exist.");
}
indexAnalyzer = namedAnalyzer.analyzer();
}
}
if (queryAnalyzer == null) {
queryAnalyzer = new StandardAnalyzer(org.elasticsearch.Version.CURRENT.luceneVersion);
}
if (indexAnalyzer == null) {
indexAnalyzer = new StandardAnalyzer(org.elasticsearch.Version.CURRENT.luceneVersion);
}
return getSuggester(indexAnalyzer, queryAnalyzer, fieldType);
}
Aggregations