use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class TermVectorsService method parseDocument.
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc, XContentType xContentType) {
MapperService mapperService = indexShard.mapperService();
DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
ParsedDocument parsedDocument = docMapper.getDocumentMapper().parse(source(index, type, "_id_for_tv_api", doc, xContentType));
if (docMapper.getMapping() != null) {
parsedDocument.addDynamicMappingsUpdate(docMapper.getMapping());
}
return parsedDocument;
}
use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class PercolateQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// Call nowInMillis() so that this query becomes un-cacheable since we
// can't be sure that it doesn't use now or scripts
context.nowInMillis();
if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) {
throw new IllegalStateException("query builder must be rewritten first");
}
if (document == null) {
throw new IllegalStateException("no document to percolate");
}
MapperService mapperService = context.getMapperService();
DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType);
DocumentMapper docMapper = docMapperForType.getDocumentMapper();
ParsedDocument doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType));
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
// Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when
// 'index.percolator.map_unmapped_fields_as_string' is enabled:
Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName);
if (analyzer != null) {
return analyzer;
} else {
return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};
final IndexSearcher docSearcher;
if (doc.docs().size() > 1) {
assert docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, doc);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc.rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
}
Version indexVersionCreated = context.getIndexSettings().getIndexVersionCreated();
boolean mapUnmappedFieldsAsString = context.getIndexSettings().getValue(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
// We have to make a copy of the QueryShardContext here so we can have a unfrozen version for parsing the legacy
// percolator queries
QueryShardContext percolateShardContext = new QueryShardContext(context);
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist");
}
if (!(fieldType instanceof PercolatorFieldMapper.FieldType)) {
throw new QueryShardException(context, "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
}
PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType;
PercolateQuery.QueryStore queryStore = createStore(pft, percolateShardContext, mapUnmappedFieldsAsString);
return pft.percolateQuery(documentType, queryStore, document, docSearcher);
}
use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class CodecTests method createCodecService.
private CodecService createCodecService() throws IOException {
Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
IndexSettings settings = IndexSettingsModule.newIndexSettings("_na", nodeSettings);
SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap());
IndexAnalyzers indexAnalyzers = createTestAnalysis(settings, nodeSettings).indexAnalyzers;
MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap());
MapperService service = new MapperService(settings, indexAnalyzers, xContentRegistry(), similarityService, mapperRegistry, () -> null);
return new CodecService(service, ESLoggerFactory.getLogger("test"));
}
use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class DocumentMapperMergeTests method testConcurrentMergeTest.
public void testConcurrentMergeTest() throws Throwable {
final MapperService mapperService = createIndex("test").mapperService();
mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false);
final DocumentMapper documentMapper = mapperService.documentMapper("test");
DocumentFieldMappers dfm = documentMapper.mappers();
try {
assertNotNull(dfm.indexAnalyzer().tokenStream("non_existing_field", "foo"));
fail();
} catch (IllegalArgumentException e) {
// ok that's expected
}
final AtomicBoolean stopped = new AtomicBoolean(false);
final CyclicBarrier barrier = new CyclicBarrier(2);
final AtomicReference<String> lastIntroducedFieldName = new AtomicReference<>();
final AtomicReference<Exception> error = new AtomicReference<>();
final Thread updater = new Thread() {
@Override
public void run() {
try {
barrier.await();
for (int i = 0; i < 200 && stopped.get() == false; i++) {
final String fieldName = Integer.toString(i);
ParsedDocument doc = documentMapper.parse("test", "test", fieldName, new BytesArray("{ \"" + fieldName + "\" : \"test\" }"));
Mapping update = doc.dynamicMappingsUpdate();
assert update != null;
lastIntroducedFieldName.set(fieldName);
mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE, false);
}
} catch (Exception e) {
error.set(e);
} finally {
stopped.set(true);
}
}
};
updater.start();
try {
barrier.await();
while (stopped.get() == false) {
final String fieldName = lastIntroducedFieldName.get();
final BytesReference source = new BytesArray("{ \"" + fieldName + "\" : \"test\" }");
ParsedDocument parsedDoc = documentMapper.parse("test", "test", "random", source);
if (parsedDoc.dynamicMappingsUpdate() != null) {
// not in the mapping yet, try again
continue;
}
dfm = documentMapper.mappers();
assertNotNull(dfm.indexAnalyzer().tokenStream(fieldName, "foo"));
}
} finally {
stopped.set(true);
updater.join();
}
if (error.get() != null) {
throw error.get();
}
}
use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class DocumentMapperMergeTests method testDoNotRepeatOriginalMapping.
public void testDoNotRepeatOriginalMapping() throws IOException {
CompressedXContent mapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_source").field("enabled", false).endObject().endObject().endObject().bytes());
MapperService mapperService = createIndex("test").mapperService();
mapperService.merge("type", mapping, MapperService.MergeReason.MAPPING_UPDATE, false);
CompressedXContent update = new CompressedXContent(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("foo").field("type", "text").endObject().endObject().endObject().endObject().bytes());
DocumentMapper mapper = mapperService.merge("type", update, MapperService.MergeReason.MAPPING_UPDATE, false);
assertNotNull(mapper.mappers().getMapper("foo"));
assertFalse(mapper.sourceMapper().enabled());
}
Aggregations