Search in sources :

Example 31 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class IndexShardTestCase method newShard.

/**
     * creates a new initializing shard.
     *
     * @param routing              shard routing to use
     * @param shardPath            path to use for shard data
     * @param indexMetaData        indexMetaData for the shard, including any mapping
     * @param indexSearcherWrapper an optional wrapper to be used during searchers
     * @param listeners            an optional set of listeners to add to the shard
     */
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, @Nullable IndexSearcherWrapper indexSearcherWrapper, Runnable globalCheckpointSyncer, @Nullable EngineFactory engineFactory, IndexingOperationListener... listeners) throws IOException {
    final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
    final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
    final IndexShard indexShard;
    final Store store = createStore(indexSettings, shardPath);
    boolean success = false;
    try {
        IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
        MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), indexSettings.getSettings());
        mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true);
        SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
        final IndexEventListener indexEventListener = new IndexEventListener() {
        };
        final Engine.Warmer warmer = searcher -> {
        };
        IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
        });
        IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache, new NoneCircuitBreakerService(), mapperService);
        indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, globalCheckpointSyncer, Collections.emptyList(), Arrays.asList(listeners));
        success = true;
    } finally {
        if (success == false) {
            IOUtils.close(store);
        }
    }
    return indexShard;
}
Also used : IndexNotFoundException(org.apache.lucene.index.IndexNotFoundException) Versions(org.elasticsearch.common.lucene.uid.Versions) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) Arrays(java.util.Arrays) Nullable(org.elasticsearch.common.Nullable) BigArrays(org.elasticsearch.common.util.BigArrays) BiFunction(java.util.function.BiFunction) VersionType(org.elasticsearch.index.VersionType) Document(org.apache.lucene.document.Document) IndexRequest(org.elasticsearch.action.index.IndexRequest) Settings(org.elasticsearch.common.settings.Settings) ShardRoutingHelper(org.elasticsearch.cluster.routing.ShardRoutingHelper) Directory(org.apache.lucene.store.Directory) ThreadPool(org.elasticsearch.threadpool.ThreadPool) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) UidFieldMapper(org.elasticsearch.index.mapper.UidFieldMapper) EnumSet(java.util.EnumSet) PeerRecoveryTargetService(org.elasticsearch.indices.recovery.PeerRecoveryTargetService) Set(java.util.Set) MapperTestUtils(org.elasticsearch.index.MapperTestUtils) Engine(org.elasticsearch.index.engine.Engine) SimilarityService(org.elasticsearch.index.similarity.SimilarityService) RecoverySource(org.elasticsearch.cluster.routing.RecoverySource) MapperService(org.elasticsearch.index.mapper.MapperService) Version(org.elasticsearch.Version) Matchers.contains(org.hamcrest.Matchers.contains) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) LeafReader(org.apache.lucene.index.LeafReader) TestShardRouting(org.elasticsearch.cluster.routing.TestShardRouting) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) StartRecoveryRequest(org.elasticsearch.indices.recovery.StartRecoveryRequest) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexFieldDataService(org.elasticsearch.index.fielddata.IndexFieldDataService) RecoveryFailedException(org.elasticsearch.indices.recovery.RecoveryFailedException) ShardRoutingState(org.elasticsearch.cluster.routing.ShardRoutingState) DisabledQueryCache(org.elasticsearch.index.cache.query.DisabledQueryCache) BytesArray(org.elasticsearch.common.bytes.BytesArray) RecoverySourceHandler(org.elasticsearch.indices.recovery.RecoverySourceHandler) HashSet(java.util.HashSet) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) IndexCache(org.elasticsearch.index.cache.IndexCache) SequenceNumbersService(org.elasticsearch.index.seqno.SequenceNumbersService) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) Store(org.elasticsearch.index.store.Store) IndexSettings(org.elasticsearch.index.IndexSettings) Node(org.elasticsearch.node.Node) Matchers.hasSize(org.hamcrest.Matchers.hasSize) ESTestCase(org.elasticsearch.test.ESTestCase) Bits(org.apache.lucene.util.Bits) SourceToParse(org.elasticsearch.index.mapper.SourceToParse) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) IndexFieldDataCache(org.elasticsearch.index.fielddata.IndexFieldDataCache) Uid(org.elasticsearch.index.mapper.Uid) RecoveryTarget(org.elasticsearch.indices.recovery.RecoveryTarget) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) DirectoryService(org.elasticsearch.index.store.DirectoryService) EngineFactory(org.elasticsearch.index.engine.EngineFactory) TimeUnit(java.util.concurrent.TimeUnit) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) IndicesFieldDataCache(org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache) DummyShardLock(org.elasticsearch.test.DummyShardLock) Collections(java.util.Collections) IndexSettings(org.elasticsearch.index.IndexSettings) Store(org.elasticsearch.index.store.Store) IndexFieldDataCache(org.elasticsearch.index.fielddata.IndexFieldDataCache) IndicesFieldDataCache(org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache) IndexFieldDataService(org.elasticsearch.index.fielddata.IndexFieldDataService) IndexCache(org.elasticsearch.index.cache.IndexCache) SimilarityService(org.elasticsearch.index.similarity.SimilarityService) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings) DisabledQueryCache(org.elasticsearch.index.cache.query.DisabledQueryCache) MapperService(org.elasticsearch.index.mapper.MapperService) Engine(org.elasticsearch.index.engine.Engine) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService)

Example 32 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class JavaMultiFieldMergeTests method testMergeMultiField.

public void testMergeMultiField() throws Exception {
    String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
    MapperService mapperService = createIndex("test").mapperService();
    DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue());
    BytesReference json = XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject().bytes();
    Document doc = docMapper.parse("test", "person", "1", json).rootDoc();
    IndexableField f = doc.getField("name");
    assertThat(f, notNullValue());
    f = doc.getField("name.indexed");
    assertThat(f, nullValue());
    mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json");
    docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed2"), nullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue());
    doc = docMapper.parse("test", "person", "1", json).rootDoc();
    f = doc.getField("name");
    assertThat(f, notNullValue());
    f = doc.getField("name.indexed");
    assertThat(f, notNullValue());
    mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json");
    docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed2"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue());
    mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json");
    docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
    assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed2"), notNullValue());
    assertThat(docMapper.mappers().getMapper("name.not_indexed3"), notNullValue());
}
Also used : BytesReference(org.elasticsearch.common.bytes.BytesReference) IndexableField(org.apache.lucene.index.IndexableField) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) Matchers.containsString(org.hamcrest.Matchers.containsString) Document(org.elasticsearch.index.mapper.ParseContext.Document) MapperService(org.elasticsearch.index.mapper.MapperService)

Example 33 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class RareClusterStateIT method testDelayedMappingPropagationOnReplica.

public void testDelayedMappingPropagationOnReplica() throws Exception {
    // This is essentially the same thing as testDelayedMappingPropagationOnPrimary
    // but for replicas
    // Here we want to test that everything goes well if the mappings that
    // are needed for a document are not available on the replica at the
    // time of indexing it
    final List<String> nodeNames = internalCluster().startNodes(2, Settings.builder().put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), // explicitly set so it won't default to publish timeout
    "30s").put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), // don't wait post commit as we are blocking things by design
    "0s").build());
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
    final String master = internalCluster().getMasterName();
    assertThat(nodeNames, hasItem(master));
    String otherNode = null;
    for (String node : nodeNames) {
        if (node.equals(master) == false) {
            otherNode = node;
            break;
        }
    }
    assertNotNull(otherNode);
    // Force allocation of the primary on the master node by first only allocating on the master
    // and then allowing all nodes so that the replica gets allocated on the other node
    assertAcked(prepareCreate("index").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).put("index.routing.allocation.include._name", master)).get());
    assertAcked(client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put("index.routing.allocation.include._name", "")).get());
    ensureGreen();
    // Check routing tables
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    assertEquals(master, state.nodes().getMasterNode().getName());
    List<ShardRouting> shards = state.routingTable().allShards("index");
    assertThat(shards, hasSize(2));
    for (ShardRouting shard : shards) {
        if (shard.primary()) {
            // primary must be on the master
            assertEquals(state.nodes().getMasterNodeId(), shard.currentNodeId());
        } else {
            assertTrue(shard.active());
        }
    }
    // Block cluster state processing on the replica
    BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, random());
    internalCluster().setDisruptionScheme(disruption);
    disruption.startDisrupting();
    final AtomicReference<Object> putMappingResponse = new AtomicReference<>();
    client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener<PutMappingResponse>() {

        @Override
        public void onResponse(PutMappingResponse response) {
            putMappingResponse.set(response);
        }

        @Override
        public void onFailure(Exception e) {
            putMappingResponse.set(e);
        }
    });
    final Index index = resolveIndex("index");
    // Wait for mappings to be available on master
    assertBusy(new Runnable() {

        @Override
        public void run() {
            final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master);
            final IndexService indexService = indicesService.indexServiceSafe(index);
            assertNotNull(indexService);
            final MapperService mapperService = indexService.mapperService();
            DocumentMapper mapper = mapperService.documentMapper("type");
            assertNotNull(mapper);
            assertNotNull(mapper.mappers().getMapper("field"));
        }
    });
    final AtomicReference<Object> docIndexResponse = new AtomicReference<>();
    client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener<IndexResponse>() {

        @Override
        public void onResponse(IndexResponse response) {
            docIndexResponse.set(response);
        }

        @Override
        public void onFailure(Exception e) {
            docIndexResponse.set(e);
        }
    });
    // Wait for document to be indexed on primary
    assertBusy(new Runnable() {

        @Override
        public void run() {
            assertTrue(client().prepareGet("index", "type", "1").setPreference("_primary").get().isExists());
        }
    });
    // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed
    // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled
    // and not just because it takes time to replicate the indexing request to the replica
    Thread.sleep(100);
    assertThat(putMappingResponse.get(), equalTo(null));
    assertThat(docIndexResponse.get(), equalTo(null));
    // Now make sure the indexing request finishes successfully
    disruption.stopDisrupting();
    assertBusy(new Runnable() {

        @Override
        public void run() {
            assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class));
            PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get();
            assertTrue(resp.isAcknowledged());
            assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class));
            IndexResponse docResp = (IndexResponse) docIndexResponse.get();
            assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), 2, // both shards should have succeeded
            docResp.getShardInfo().getTotal());
        }
    });
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) BlockClusterStateProcessing(org.elasticsearch.test.disruption.BlockClusterStateProcessing) IndexService(org.elasticsearch.index.IndexService) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) IndicesService(org.elasticsearch.indices.IndicesService) AtomicReference(java.util.concurrent.atomic.AtomicReference) Index(org.elasticsearch.index.Index) IOException(java.io.IOException) IndexResponse(org.elasticsearch.action.index.IndexResponse) PutMappingResponse(org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) MapperService(org.elasticsearch.index.mapper.MapperService)

Example 34 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class IndexService method createShard.

public synchronized IndexShard createShard(ShardRouting routing) throws IOException {
    final boolean primary = routing.primary();
    /*
         * TODO: we execute this in parallel but it's a synced method. Yet, we might
         * be able to serialize the execution via the cluster state in the future. for now we just
         * keep it synced.
         */
    if (closed.get()) {
        throw new IllegalStateException("Can't create shard " + routing.shardId() + ", closed");
    }
    final Settings indexSettings = this.indexSettings.getSettings();
    final ShardId shardId = routing.shardId();
    boolean success = false;
    Store store = null;
    IndexShard indexShard = null;
    ShardLock lock = null;
    try {
        lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5));
        eventListener.beforeIndexShardCreated(shardId, indexSettings);
        ShardPath path;
        try {
            path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings);
        } catch (IllegalStateException ex) {
            logger.warn("{} failed to load shard path, trying to remove leftover", shardId);
            try {
                ShardPath.deleteLeftoverShardDirectory(logger, nodeEnv, lock, this.indexSettings);
                path = ShardPath.loadShardPath(logger, nodeEnv, shardId, this.indexSettings);
            } catch (Exception inner) {
                ex.addSuppressed(inner);
                throw ex;
            }
        }
        if (path == null) {
            // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
            // that's being relocated/replicated we know how large it will become once it's done copying:
            // Count up how many shards are currently on each data path:
            Map<Path, Integer> dataPathToShardCount = new HashMap<>();
            for (IndexShard shard : this) {
                Path dataPath = shard.shardPath().getRootStatePath();
                Integer curCount = dataPathToShardCount.get(dataPath);
                if (curCount == null) {
                    curCount = 0;
                }
                dataPathToShardCount.put(dataPath, curCount + 1);
            }
            path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), dataPathToShardCount);
            logger.debug("{} creating using a new path [{}]", shardId, path);
        } else {
            logger.debug("{} creating using an existing path [{}]", shardId, path);
        }
        if (shards.containsKey(shardId.id())) {
            throw new IllegalStateException(shardId + " already exists");
        }
        logger.debug("creating shard_id {}", shardId);
        // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
        final boolean canDeleteShardContent = this.indexSettings.isOnSharedFilesystem() == false || (primary && this.indexSettings.isOnSharedFilesystem());
        final Engine.Warmer engineWarmer = (searcher) -> {
            IndexShard shard = getShardOrNull(shardId.getId());
            if (shard != null) {
                warmer.warm(searcher, shard, IndexService.this.indexSettings);
            }
        };
        store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
        if (useShadowEngine(primary, this.indexSettings)) {
            indexShard = new ShadowIndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, searchOperationListeners);
        // no indexing listeners - shadow  engines don't index
        } else {
            indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, () -> globalCheckpointSyncer.accept(shardId), searchOperationListeners, indexingOperationListeners);
        }
        eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
        eventListener.afterIndexShardCreated(indexShard);
        shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
        success = true;
        return indexShard;
    } catch (ShardLockObtainFailedException e) {
        throw new IOException("failed to obtain in-memory shard lock", e);
    } finally {
        if (success == false) {
            if (lock != null) {
                IOUtils.closeWhileHandlingException(lock);
            }
            closeShard("initialization failed", shardId, indexShard, store, eventListener);
        }
    }
}
Also used : Path(java.nio.file.Path) ShardPath(org.elasticsearch.index.shard.ShardPath) BitsetFilterCache(org.elasticsearch.index.cache.bitset.BitsetFilterCache) ShardId(org.elasticsearch.index.shard.ShardId) ScheduledFuture(java.util.concurrent.ScheduledFuture) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) LongSupplier(java.util.function.LongSupplier) Nullable(org.elasticsearch.common.Nullable) BigArrays(org.elasticsearch.common.util.BigArrays) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IndexAnalyzers(org.elasticsearch.index.analysis.IndexAnalyzers) MapperRegistry(org.elasticsearch.indices.mapper.MapperRegistry) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) Settings(org.elasticsearch.common.settings.Settings) SearchOperationListener(org.elasticsearch.index.shard.SearchOperationListener) Map(java.util.Map) ThreadPool(org.elasticsearch.threadpool.ThreadPool) Path(java.nio.file.Path) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) IndexStore(org.elasticsearch.index.store.IndexStore) Set(java.util.Set) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) AnalysisRegistry(org.elasticsearch.index.analysis.AnalysisRegistry) MapBuilder.newMapBuilder(org.elasticsearch.common.collect.MapBuilder.newMapBuilder) Engine(org.elasticsearch.index.engine.Engine) SimilarityService(org.elasticsearch.index.similarity.SimilarityService) Objects(java.util.Objects) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) Supplier(org.apache.logging.log4j.util.Supplier) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) IndicesClusterStateService(org.elasticsearch.indices.cluster.IndicesClusterStateService) Accountable(org.apache.lucene.util.Accountable) ShardPath(org.elasticsearch.index.shard.ShardPath) IndexingOperationListener(org.elasticsearch.index.shard.IndexingOperationListener) ShadowIndexShard(org.elasticsearch.index.shard.ShadowIndexShard) IndexReader(org.apache.lucene.index.IndexReader) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFieldDataService(org.elasticsearch.index.fielddata.IndexFieldDataService) ClusterService(org.elasticsearch.cluster.service.ClusterService) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) CircuitBreakerService(org.elasticsearch.indices.breaker.CircuitBreakerService) IndexCache(org.elasticsearch.index.cache.IndexCache) IndexSearcherWrapper(org.elasticsearch.index.shard.IndexSearcherWrapper) TimeValue(org.elasticsearch.common.unit.TimeValue) Store(org.elasticsearch.index.store.Store) Collections.emptyMap(java.util.Collections.emptyMap) FutureUtils(org.elasticsearch.common.util.concurrent.FutureUtils) IndexFieldDataCache(org.elasticsearch.index.fielddata.IndexFieldDataCache) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) Iterator(java.util.Iterator) Client(org.elasticsearch.client.Client) IndexShard(org.elasticsearch.index.shard.IndexShard) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) ShardLock(org.elasticsearch.env.ShardLock) EngineFactory(org.elasticsearch.index.engine.EngineFactory) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) NodeEnvironment(org.elasticsearch.env.NodeEnvironment) IndicesFieldDataCache(org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache) QueryCache(org.elasticsearch.index.cache.query.QueryCache) Closeable(java.io.Closeable) Translog(org.elasticsearch.index.translog.Translog) Collections.unmodifiableMap(java.util.Collections.unmodifiableMap) ScriptService(org.elasticsearch.script.ScriptService) Collections(java.util.Collections) HashMap(java.util.HashMap) ShadowIndexShard(org.elasticsearch.index.shard.ShadowIndexShard) IndexShard(org.elasticsearch.index.shard.IndexShard) IndexStore(org.elasticsearch.index.store.IndexStore) Store(org.elasticsearch.index.store.Store) ShadowIndexShard(org.elasticsearch.index.shard.ShadowIndexShard) IOException(java.io.IOException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IOException(java.io.IOException) ShardId(org.elasticsearch.index.shard.ShardId) ShardPath(org.elasticsearch.index.shard.ShardPath) ShardLock(org.elasticsearch.env.ShardLock) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) Settings(org.elasticsearch.common.settings.Settings) Engine(org.elasticsearch.index.engine.Engine)

Example 35 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class DocumentMapperMergeTests method testChangeSearchAnalyzerToDefault.

public void testChangeSearchAnalyzerToDefault() throws Exception {
    MapperService mapperService = createIndex("test").mapperService();
    String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field").field("type", "text").field("analyzer", "standard").field("search_analyzer", "whitespace").endObject().endObject().endObject().endObject().string();
    String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field").field("type", "text").field("analyzer", "standard").endObject().endObject().endObject().endObject().string();
    DocumentMapper existing = mapperService.merge("type", new CompressedXContent(mapping1), MapperService.MergeReason.MAPPING_UPDATE, false);
    DocumentMapper merged = mapperService.merge("type", new CompressedXContent(mapping2), MapperService.MergeReason.MAPPING_UPDATE, false);
    assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace"));
    assertThat(((NamedAnalyzer) merged.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard"));
}
Also used : DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) Matchers.containsString(org.hamcrest.Matchers.containsString) MapperService(org.elasticsearch.index.mapper.MapperService)

Aggregations

MapperService (org.elasticsearch.index.mapper.MapperService)46 Settings (org.elasticsearch.common.settings.Settings)16 DocumentMapper (org.elasticsearch.index.mapper.DocumentMapper)14 IndexSettings (org.elasticsearch.index.IndexSettings)13 CompressedXContent (org.elasticsearch.common.compress.CompressedXContent)12 IOException (java.io.IOException)10 Store (org.elasticsearch.index.store.Store)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 IndexMetadata (org.elasticsearch.cluster.metadata.IndexMetadata)8 Index (org.elasticsearch.index.Index)8 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)8 HashMap (java.util.HashMap)7 Map (java.util.Map)7 IndexService (org.elasticsearch.index.IndexService)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)6 IndexAnalyzers (org.elasticsearch.index.analysis.IndexAnalyzers)6 Collections (java.util.Collections)5 HashSet (java.util.HashSet)5 List (java.util.List)5 Analyzer (org.apache.lucene.analysis.Analyzer)5