use of org.opensearch.index.mapper.DocumentMapper in project OpenSearch by opensearch-project.
the class RareClusterStateIT method testDelayedMappingPropagationOnReplica.
public void testDelayedMappingPropagationOnReplica() throws Exception {
// This is essentially the same thing as testDelayedMappingPropagationOnPrimary
// but for replicas
// Here we want to test that everything goes well if the mappings that
// are needed for a document are not available on the replica at the
// time of indexing it
final List<String> nodeNames = internalCluster().startNodes(2);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
final String master = internalCluster().getMasterName();
assertThat(nodeNames, hasItem(master));
String otherNode = null;
for (String node : nodeNames) {
if (node.equals(master) == false) {
otherNode = node;
break;
}
}
assertNotNull(otherNode);
// Force allocation of the primary on the master node by first only allocating on the master
// and then allowing all nodes so that the replica gets allocated on the other node
prepareCreate("index").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put("index.routing.allocation.include._name", master)).get();
client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put("index.routing.allocation.include._name", "")).get();
ensureGreen();
// Check routing tables
ClusterState state = client().admin().cluster().prepareState().get().getState();
assertEquals(master, state.nodes().getMasterNode().getName());
List<ShardRouting> shards = state.routingTable().allShards("index");
assertThat(shards, hasSize(2));
for (ShardRouting shard : shards) {
if (shard.primary()) {
// primary must be on the master
assertEquals(state.nodes().getMasterNodeId(), shard.currentNodeId());
} else {
assertTrue(shard.active());
}
}
// Block cluster state processing on the replica
BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, random());
internalCluster().setDisruptionScheme(disruption);
disruption.startDisrupting();
final ActionFuture<AcknowledgedResponse> putMappingResponse = executeAndCancelCommittedPublication(client().admin().indices().preparePutMapping("index").setSource("field", "type=long"));
final Index index = resolveIndex("index");
// Wait for mappings to be available on master
assertBusy(() -> {
final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master);
final IndexService indexService = indicesService.indexServiceSafe(index);
assertNotNull(indexService);
final MapperService mapperService = indexService.mapperService();
DocumentMapper mapper = mapperService.documentMapper(MapperService.SINGLE_MAPPING_NAME);
assertNotNull(mapper);
assertNotNull(mapper.mappers().getMapper("field"));
});
final ActionFuture<IndexResponse> docIndexResponse = client().prepareIndex("index").setId("1").setSource("field", 42).execute();
assertBusy(() -> assertTrue(client().prepareGet("index", "1").get().isExists()));
// index another document, this time using dynamic mappings.
// The ack timeout of 0 on dynamic mapping updates makes it possible for the document to be indexed on the primary, even
// if the dynamic mapping update is not applied on the replica yet.
// this request does not change the cluster state, because the mapping is dynamic,
// we need to await and cancel committed publication
ActionFuture<IndexResponse> dynamicMappingsFut = executeAndCancelCommittedPublication(client().prepareIndex("index").setId("2").setSource("field2", 42));
// ...and wait for second mapping to be available on master
assertBusy(() -> {
final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master);
final IndexService indexService = indicesService.indexServiceSafe(index);
assertNotNull(indexService);
final MapperService mapperService = indexService.mapperService();
DocumentMapper mapper = mapperService.documentMapper(MapperService.SINGLE_MAPPING_NAME);
assertNotNull(mapper);
assertNotNull(mapper.mappers().getMapper("field2"));
});
assertBusy(() -> assertTrue(client().prepareGet("index", "2").get().isExists()));
// The mappings have not been propagated to the replica yet as a consequence the document count not be indexed
// We wait on purpose to make sure that the document is not indexed because the shard operation is stalled
// and not just because it takes time to replicate the indexing request to the replica
Thread.sleep(100);
assertFalse(putMappingResponse.isDone());
assertFalse(docIndexResponse.isDone());
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged());
assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class));
// both shards should have succeeded
assertEquals(2, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal());
assertThat(dynamicMappingsFut.get(10, TimeUnit.SECONDS).getResult(), equalTo(CREATED));
}
use of org.opensearch.index.mapper.DocumentMapper in project OpenSearch by opensearch-project.
the class SizeMappingTests method testSizeNotSet.
public void testSizeNotSet() throws Exception {
IndexService service = createIndex("test", Settings.EMPTY, "type");
DocumentMapper docMapper = service.mapperService().documentMapper("type");
BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject());
ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", source, XContentType.JSON));
assertThat(doc.rootDoc().getField("_size"), nullValue());
}
use of org.opensearch.index.mapper.DocumentMapper in project OpenSearch by opensearch-project.
the class PercolateQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
if (context.allowExpensiveQueries() == false) {
throw new OpenSearchException("[percolate] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false.");
}
// Call nowInMillis() so that this query becomes un-cacheable since we
// can't be sure that it doesn't use now or scripts
context.nowInMillis();
if (indexedDocumentIndex != null || indexedDocumentId != null || documentSupplier != null) {
throw new IllegalStateException("query builder must be rewritten first");
}
if (documents.isEmpty()) {
throw new IllegalStateException("no document to percolate");
}
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist");
}
if (!(fieldType instanceof PercolatorFieldMapper.PercolatorFieldType)) {
throw new QueryShardException(context, "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
}
final List<ParsedDocument> docs = new ArrayList<>();
final DocumentMapper docMapper;
final MapperService mapperService = context.getMapperService();
String type = mapperService.documentMapper().type();
if (documentType != null) {
deprecationLogger.deprecate("percolate_with_document_type", DOCUMENT_TYPE_DEPRECATION_MESSAGE);
if (documentType.equals(type) == false) {
throw new IllegalArgumentException("specified document_type [" + documentType + "] is not equal to the actual type [" + type + "]");
}
}
docMapper = mapperService.documentMapper(type);
for (BytesReference document : documents) {
docs.add(docMapper.parse(new SourceToParse(context.index().getName(), type, "_temp_id", document, documentXContentType)));
}
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
// Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when
// 'index.percolator.map_unmapped_fields_as_string' is enabled:
Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName);
if (analyzer != null) {
return analyzer;
} else {
return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};
final IndexSearcher docSearcher;
final boolean excludeNestedDocuments;
if (docs.size() > 1 || docs.get(0).docs().size() > 1) {
assert docs.size() != 1 || docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, docs);
excludeNestedDocuments = docMapper.hasNestedObjects() && docs.stream().map(ParsedDocument::docs).mapToInt(List::size).anyMatch(size -> size > 1);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
excludeNestedDocuments = false;
}
PercolatorFieldMapper.PercolatorFieldType pft = (PercolatorFieldMapper.PercolatorFieldType) fieldType;
String name = this.name != null ? this.name : pft.name();
QueryShardContext percolateShardContext = wrap(context);
PercolatorFieldMapper.configureContext(percolateShardContext, pft.mapUnmappedFieldsAsText);
;
PercolateQuery.QueryStore queryStore = createStore(pft.queryBuilderField, percolateShardContext);
return pft.percolateQuery(name, queryStore, documents, docSearcher, excludeNestedDocuments, context.indexVersionCreated());
}
use of org.opensearch.index.mapper.DocumentMapper in project OpenSearch by opensearch-project.
the class ParentJoinFieldMapperTests method testParentIdSpecifiedAsNumber.
public void testParentIdSpecifiedAsNumber() throws Exception {
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").startObject("join_field").field("type", "join").startObject("relations").field("parent", "child").endObject().endObject().endObject().endObject());
IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "2", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("join_field").field("name", "child").field("parent", 1).endObject().endObject()), XContentType.JSON, "1"));
assertEquals("1", doc.rootDoc().getBinaryValue("join_field#parent").utf8ToString());
assertEquals("child", doc.rootDoc().getBinaryValue("join_field").utf8ToString());
doc = docMapper.parse(new SourceToParse("test", "type", "2", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("join_field").field("name", "child").field("parent", 1.0).endObject().endObject()), XContentType.JSON, "1"));
assertEquals("1.0", doc.rootDoc().getBinaryValue("join_field#parent").utf8ToString());
assertEquals("child", doc.rootDoc().getBinaryValue("join_field").utf8ToString());
}
use of org.opensearch.index.mapper.DocumentMapper in project OpenSearch by opensearch-project.
the class ParentJoinFieldMapperTests method testMultipleLevels.
public void testMultipleLevels() throws Exception {
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").startObject("join_field").field("type", "join").startObject("relations").field("parent", "child").field("child", "grand_child").endObject().endObject().endObject().endObject());
IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
// Doc without join
ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "0", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()), XContentType.JSON));
assertNull(doc.rootDoc().getBinaryValue("join_field"));
// Doc parent
doc = docMapper.parse(new SourceToParse("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "parent").endObject()), XContentType.JSON));
assertEquals("1", doc.rootDoc().getBinaryValue("join_field#parent").utf8ToString());
assertEquals("parent", doc.rootDoc().getBinaryValue("join_field").utf8ToString());
// Doc child
doc = docMapper.parse(new SourceToParse("test", "type", "2", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("join_field").field("name", "child").field("parent", "1").endObject().endObject()), XContentType.JSON, "1"));
assertEquals("1", doc.rootDoc().getBinaryValue("join_field#parent").utf8ToString());
assertEquals("2", doc.rootDoc().getBinaryValue("join_field#child").utf8ToString());
assertEquals("child", doc.rootDoc().getBinaryValue("join_field").utf8ToString());
// Doc child missing parent
MapperException exc = expectThrows(MapperParsingException.class, () -> docMapper.parse(new SourceToParse("test", "type", "2", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "child").endObject()), XContentType.JSON, "1")));
assertThat(exc.getRootCause().getMessage(), containsString("[parent] is missing for join field [join_field]"));
// Doc child missing routing
exc = expectThrows(MapperParsingException.class, () -> docMapper.parse(new SourceToParse("test", "type", "2", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("join_field").field("name", "child").field("parent", "1").endObject().endObject()), XContentType.JSON)));
assertThat(exc.getRootCause().getMessage(), containsString("[routing] is missing for join field [join_field]"));
// Doc grand_child
doc = docMapper.parse(new SourceToParse("test", "type", "3", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("join_field").field("name", "grand_child").field("parent", "2").endObject().endObject()), XContentType.JSON, "1"));
assertEquals("2", doc.rootDoc().getBinaryValue("join_field#child").utf8ToString());
assertEquals("grand_child", doc.rootDoc().getBinaryValue("join_field").utf8ToString());
// Unknown join name
exc = expectThrows(MapperParsingException.class, () -> docMapper.parse(new SourceToParse("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "unknown").endObject()), XContentType.JSON)));
assertThat(exc.getRootCause().getMessage(), containsString("unknown join name [unknown] for field [join_field]"));
}
Aggregations