Search in sources :

Example 16 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project crate by crate.

the class MapperTestUtils method newMapperService.

public static MapperService newMapperService(NamedXContentRegistry xContentRegistry, Path tempDir, Settings settings, IndicesModule indicesModule, String indexName) throws IOException {
    Settings.Builder settingsBuilder = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), tempDir).put(settings);
    if (settings.get(IndexMetadata.SETTING_VERSION_CREATED) == null) {
        settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT);
    }
    Settings finalSettings = settingsBuilder.build();
    MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
    IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings);
    IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers;
    return new MapperService(indexSettings, indexAnalyzers, xContentRegistry, mapperRegistry, () -> null);
}
Also used : MapperRegistry(org.elasticsearch.indices.mapper.MapperRegistry) IndexAnalyzers(org.elasticsearch.index.analysis.IndexAnalyzers) Settings(org.elasticsearch.common.settings.Settings) MapperService(org.elasticsearch.index.mapper.MapperService)

Example 17 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project crate by crate.

the class BlobStoreRepository method snapshotShard.

@Override
public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, boolean writeShardGens, ActionListener<String> listener) {
    final ShardId shardId = store.shardId();
    final long startTime = threadPool.absoluteTimeInMillis();
    try {
        final String generation = snapshotStatus.generation();
        LOGGER.debug("[{}] [{}] snapshot to [{}] [{}] ...", shardId, snapshotId, metadata.name(), generation);
        final BlobContainer shardContainer = shardContainer(indexId, shardId);
        final Set<String> blobs;
        if (generation == null) {
            try {
                blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX).keySet();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
            }
        } else {
            blobs = Collections.singleton(INDEX_FILE_PREFIX + generation);
        }
        Tuple<BlobStoreIndexShardSnapshots, String> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, generation);
        BlobStoreIndexShardSnapshots snapshots = tuple.v1();
        final String fileListGeneration = tuple.v2();
        if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) {
            throw new IndexShardSnapshotFailedException(shardId, "Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting");
        }
        final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = new ArrayList<>();
        final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new LinkedBlockingQueue<>();
        store.incRef();
        final Collection<String> fileNames;
        final Store.MetadataSnapshot metadataFromStore;
        try {
            // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
            try {
                LOGGER.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit);
                metadataFromStore = store.getMetadata(snapshotIndexCommit);
                fileNames = snapshotIndexCommit.getFileNames();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
            }
        } finally {
            store.decRef();
        }
        int indexIncrementalFileCount = 0;
        int indexTotalNumberOfFiles = 0;
        long indexIncrementalSize = 0;
        long indexTotalFileCount = 0;
        for (String fileName : fileNames) {
            if (snapshotStatus.isAborted()) {
                LOGGER.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
                throw new IndexShardSnapshotFailedException(shardId, "Aborted");
            }
            LOGGER.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
            final StoreFileMetadata md = metadataFromStore.get(fileName);
            BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null;
            List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
            if (filesInfo != null) {
                for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
                    if (fileInfo.isSame(md)) {
                        // a commit point file with the same name, size and checksum was already copied to repository
                        // we will reuse it for this snapshot
                        existingFileInfo = fileInfo;
                        break;
                    }
                }
            }
            indexTotalFileCount += md.length();
            indexTotalNumberOfFiles++;
            if (existingFileInfo == null) {
                indexIncrementalFileCount++;
                indexIncrementalSize += md.length();
                // create a new FileInfo
                BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize());
                indexCommitPointFiles.add(snapshotFileInfo);
                filesToSnapshot.add(snapshotFileInfo);
            } else {
                indexCommitPointFiles.add(existingFileInfo);
            }
        }
        snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount);
        assert indexIncrementalFileCount == filesToSnapshot.size();
        final StepListener<Collection<Void>> allFilesUploadedListener = new StepListener<>();
        allFilesUploadedListener.whenComplete(v -> {
            final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration());
            // now create and write the commit point
            final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), indexCommitPointFiles, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), lastSnapshotStatus.getIncrementalFileCount(), lastSnapshotStatus.getIncrementalSize());
            LOGGER.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
            try {
                indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID(), false);
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
            }
            // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
            List<SnapshotFiles> newSnapshotsList = new ArrayList<>();
            newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
            for (SnapshotFiles point : snapshots) {
                newSnapshotsList.add(point);
            }
            final List<String> blobsToDelete;
            final String indexGeneration;
            if (writeShardGens) {
                indexGeneration = UUIDs.randomBase64UUID();
                blobsToDelete = Collections.emptyList();
            } else {
                indexGeneration = Long.toString(Long.parseLong(fileListGeneration) + 1);
                // Delete all previous index-N blobs
                blobsToDelete = blobs.stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList());
                assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))).max().orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N blobs " + blobsToDelete;
            }
            try {
                writeShardIndexBlob(shardContainer, indexGeneration, new BlobStoreIndexShardSnapshots(newSnapshotsList));
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e);
            }
            if (writeShardGens == false) {
                try {
                    shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete);
                } catch (IOException e) {
                    LOGGER.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", snapshotId, shardId), e);
                }
            }
            snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), indexGeneration);
            listener.onResponse(indexGeneration);
        }, listener::onFailure);
        if (indexIncrementalFileCount == 0) {
            allFilesUploadedListener.onResponse(Collections.emptyList());
            return;
        }
        final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
        int maximumPoolSize = executor instanceof ThreadPoolExecutor ? ((ThreadPoolExecutor) executor).getMaximumPoolSize() : 1;
        // Start as many workers as fit into the snapshot pool at once at the most
        final int workers = Math.min(maximumPoolSize, indexIncrementalFileCount);
        final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, workers, allFilesUploadedListener);
        for (int i = 0; i < workers; ++i) {
            executor.execute(ActionRunnable.run(filesListener, () -> {
                BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
                if (snapshotFileInfo != null) {
                    store.incRef();
                    try {
                        do {
                            snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store);
                            snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
                        } while (snapshotFileInfo != null);
                    } finally {
                        store.decRef();
                    }
                }
            }));
        }
    } catch (Exception e) {
        listener.onFailure(e);
    }
}
Also used : IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Map(java.util.Map) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) IOContext(org.apache.lucene.store.IOContext) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) BlobStore(org.elasticsearch.common.blobstore.BlobStore) SnapshotException(org.elasticsearch.snapshots.SnapshotException) FileInfo.canonicalName(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Tuple(io.crate.common.collections.Tuple) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) ClusterService(org.elasticsearch.cluster.service.ClusterService) SnapshotShardFailure(org.elasticsearch.snapshots.SnapshotShardFailure) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) LoggingDeprecationHandler(org.elasticsearch.common.xcontent.LoggingDeprecationHandler) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Nullable(javax.annotation.Nullable) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) XContentParser(org.elasticsearch.common.xcontent.XContentParser) AtomicLong(java.util.concurrent.atomic.AtomicLong) CounterMetric(org.elasticsearch.common.metrics.CounterMetric) ActionListener(org.elasticsearch.action.ActionListener) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RepositoryMetadata(org.elasticsearch.cluster.metadata.RepositoryMetadata) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) Streams(org.elasticsearch.common.io.Streams) ThreadPool(org.elasticsearch.threadpool.ThreadPool) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) ActionRunnable(org.elasticsearch.action.ActionRunnable) StepListener(org.elasticsearch.action.StepListener) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) RepositoryException(org.elasticsearch.repositories.RepositoryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) NotXContentException(org.elasticsearch.common.compress.NotXContentException) Setting(org.elasticsearch.common.settings.Setting) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BlobMetadata(org.elasticsearch.common.blobstore.BlobMetadata) BytesReference(org.elasticsearch.common.bytes.BytesReference) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Version(org.elasticsearch.Version) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) RepositoryData(org.elasticsearch.repositories.RepositoryData) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) Index(org.elasticsearch.index.Index) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexId(org.elasticsearch.repositories.IndexId) FilterInputStream(java.io.FilterInputStream) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobPath(org.elasticsearch.common.blobstore.BlobPath) IndexOutput(org.apache.lucene.store.IndexOutput) Numbers(org.elasticsearch.common.Numbers) Repository(org.elasticsearch.repositories.Repository) SnapshotsService(org.elasticsearch.snapshots.SnapshotsService) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) RepositoryOperation(org.elasticsearch.repositories.RepositoryOperation) Snapshot(org.elasticsearch.snapshots.Snapshot) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) ArrayList(java.util.ArrayList) BlobStore(org.elasticsearch.common.blobstore.BlobStore) Store(org.elasticsearch.index.store.Store) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ShardId(org.elasticsearch.index.shard.ShardId) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IOException(java.io.IOException) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) RepositoryException(org.elasticsearch.repositories.RepositoryException) NotXContentException(org.elasticsearch.common.compress.NotXContentException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) Collection(java.util.Collection) StepListener(org.elasticsearch.action.StepListener) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 18 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project crate by crate.

the class InternalEngineTests method testForceMergeWithSoftDeletesRetention.

@Test
public void testForceMergeWithSoftDeletesRetention() throws Exception {
    final long retainedExtraOps = randomLongBetween(0, 10);
    Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps);
    final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
    final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
    final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
    final MapperService mapperService = createMapperService("test");
    final Set<String> liveDocs = new HashSet<>();
    try (Store store = createStore();
        InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) {
        int numDocs = scaledRandomIntBetween(10, 100);
        for (int i = 0; i < numDocs; i++) {
            ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
            engine.index(indexForDoc(doc));
            liveDocs.add(doc.id());
        }
        for (int i = 0; i < numDocs; i++) {
            ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
            if (randomBoolean()) {
                engine.delete(new Engine.Delete(doc.id(), newUid(doc.id()), primaryTerm.get()));
                liveDocs.remove(doc.id());
            }
            if (randomBoolean()) {
                engine.index(indexForDoc(doc));
                liveDocs.add(doc.id());
            }
            if (randomBoolean()) {
                engine.flush(randomBoolean(), true);
            }
        }
        engine.flush();
        long localCheckpoint = engine.getProcessedLocalCheckpoint();
        globalCheckpoint.set(randomLongBetween(0, localCheckpoint));
        engine.syncTranslog();
        final long safeCommitCheckpoint;
        try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) {
            safeCommitCheckpoint = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
        }
        engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
        assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
        Map<Long, Translog.Operation> ops = readAllOperationsInLucene(engine, mapperService).stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity()));
        for (long seqno = 0; seqno <= localCheckpoint; seqno++) {
            long minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitCheckpoint + 1);
            String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]";
            if (seqno < minSeqNoToRetain) {
                Translog.Operation op = ops.get(seqno);
                if (op != null) {
                    assertThat(op, instanceOf(Translog.Index.class));
                    assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs));
                    assertEquals(msg, ((Translog.Index) op).source(), B_1);
                }
            } else {
                assertThat(msg, ops.get(seqno), notNullValue());
            }
        }
        settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0);
        indexSettings.updateIndexMetadata(IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build());
        engine.onSettingsChanged(indexSettings.getTranslogRetentionAge(), indexSettings.getTranslogRetentionSize(), indexSettings.getSoftDeleteRetentionOperations());
        globalCheckpoint.set(localCheckpoint);
        engine.syncTranslog();
        engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
        assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
        assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocs.size()));
    }
}
Also used : IndexSettings(org.elasticsearch.index.IndexSettings) Store(org.elasticsearch.index.store.Store) Matchers.containsString(org.hamcrest.Matchers.containsString) LongPoint(org.apache.lucene.document.LongPoint) TestTranslog(org.elasticsearch.index.translog.TestTranslog) Translog(org.elasticsearch.index.translog.Translog) AtomicLong(java.util.concurrent.atomic.AtomicLong) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) AtomicLong(java.util.concurrent.atomic.AtomicLong) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings) MapperService(org.elasticsearch.index.mapper.MapperService) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 19 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class MetaDataCreateIndexService method onlyCreateIndex.

private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
    Settings.Builder updatedSettingsBuilder = Settings.builder();
    updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
    indexScopedSettings.validate(updatedSettingsBuilder);
    request.settings(updatedSettingsBuilder.build());
    clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, wrapPreservingContext(listener)) {

        @Override
        protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
            return new ClusterStateUpdateResponse(acknowledged);
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            Index createdIndex = null;
            String removalExtraInfo = null;
            IndexRemovalReason removalReason = IndexRemovalReason.FAILURE;
            try {
                validate(request, currentState);
                for (Alias alias : request.aliases()) {
                    aliasValidator.validateAlias(alias, request.index(), currentState.metaData());
                }
                // we only find a template when its an API call (a new index)
                // find templates, highest order are better matching
                List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
                Map<String, Custom> customs = new HashMap<>();
                // add the request mapping
                Map<String, Map<String, Object>> mappings = new HashMap<>();
                Map<String, AliasMetaData> templatesAliases = new HashMap<>();
                List<String> templateNames = new ArrayList<>();
                for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
                    mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
                }
                for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
                    customs.put(entry.getKey(), entry.getValue());
                }
                // apply templates, merging the mappings into the request mapping if exists
                for (IndexTemplateMetaData template : templates) {
                    templateNames.add(template.getName());
                    for (ObjectObjectCursor<String, CompressedXContent> cursor : template.mappings()) {
                        String mappingString = cursor.value.string();
                        if (mappings.containsKey(cursor.key)) {
                            XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(xContentRegistry, mappingString));
                        } else {
                            mappings.put(cursor.key, MapperService.parseMapping(xContentRegistry, mappingString));
                        }
                    }
                    // handle custom
                    for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
                        String type = cursor.key;
                        IndexMetaData.Custom custom = cursor.value;
                        IndexMetaData.Custom existing = customs.get(type);
                        if (existing == null) {
                            customs.put(type, custom);
                        } else {
                            IndexMetaData.Custom merged = existing.mergeWith(custom);
                            customs.put(type, merged);
                        }
                    }
                    //handle aliases
                    for (ObjectObjectCursor<String, AliasMetaData> cursor : template.aliases()) {
                        AliasMetaData aliasMetaData = cursor.value;
                        // ignore this one taken from the index template
                        if (request.aliases().contains(new Alias(aliasMetaData.alias()))) {
                            continue;
                        }
                        //if an alias with same name was already processed, ignore this one
                        if (templatesAliases.containsKey(cursor.key)) {
                            continue;
                        }
                        //Allow templatesAliases to be templated by replacing a token with the name of the index that we are applying it to
                        if (aliasMetaData.alias().contains("{index}")) {
                            String templatedAlias = aliasMetaData.alias().replace("{index}", request.index());
                            aliasMetaData = AliasMetaData.newAliasMetaData(aliasMetaData, templatedAlias);
                        }
                        aliasValidator.validateAliasMetaData(aliasMetaData, request.index(), currentState.metaData());
                        templatesAliases.put(aliasMetaData.alias(), aliasMetaData);
                    }
                }
                Settings.Builder indexSettingsBuilder = Settings.builder();
                // apply templates, here, in reverse order, since first ones are better matching
                for (int i = templates.size() - 1; i >= 0; i--) {
                    indexSettingsBuilder.put(templates.get(i).settings());
                }
                // now, put the request settings, so they override templates
                indexSettingsBuilder.put(request.settings());
                if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
                    indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
                }
                if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
                    indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
                }
                if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
                    indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
                }
                if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
                    DiscoveryNodes nodes = currentState.nodes();
                    final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
                    indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
                }
                if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
                    indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
                }
                indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
                indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
                final Index shrinkFromIndex = request.shrinkFrom();
                int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());
                ;
                if (shrinkFromIndex != null) {
                    prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index());
                    IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
                    routingNumShards = sourceMetaData.getRoutingNumShards();
                }
                Settings actualIndexSettings = indexSettingsBuilder.build();
                IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()).setRoutingNumShards(routingNumShards);
                // Set up everything, now locally create the index to see that things are ok, and apply
                final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
                ActiveShardCount waitForActiveShards = request.waitForActiveShards();
                if (waitForActiveShards == ActiveShardCount.DEFAULT) {
                    waitForActiveShards = tmpImd.getWaitForActiveShards();
                }
                if (waitForActiveShards.validate(tmpImd.getNumberOfReplicas()) == false) {
                    throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() + "]: cannot be greater than number of shard copies [" + (tmpImd.getNumberOfReplicas() + 1) + "]");
                }
                // create the index here (on the master) to validate it can be created, as well as adding the mapping
                final IndexService indexService = indicesService.createIndex(tmpImd, Collections.emptyList(), shardId -> {
                });
                createdIndex = indexService.index();
                // now add the mappings
                MapperService mapperService = indexService.mapperService();
                try {
                    mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
                } catch (Exception e) {
                    removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
                    throw e;
                }
                // the context is only used for validation so it's fine to pass fake values for the shard id and the current
                // timestamp
                final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L);
                for (Alias alias : request.aliases()) {
                    if (Strings.hasLength(alias.filter())) {
                        aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry);
                    }
                }
                for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                    if (aliasMetaData.filter() != null) {
                        aliasValidator.validateAliasFilter(aliasMetaData.alias(), aliasMetaData.filter().uncompressed(), queryShardContext, xContentRegistry);
                    }
                }
                // now, update the mappings with the actual source
                Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
                for (DocumentMapper mapper : mapperService.docMappers(true)) {
                    MappingMetaData mappingMd = new MappingMetaData(mapper);
                    mappingsMetaData.put(mapper.type(), mappingMd);
                }
                final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings).setRoutingNumShards(routingNumShards);
                for (MappingMetaData mappingMd : mappingsMetaData.values()) {
                    indexMetaDataBuilder.putMapping(mappingMd);
                }
                for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                    indexMetaDataBuilder.putAlias(aliasMetaData);
                }
                for (Alias alias : request.aliases()) {
                    AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()).indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build();
                    indexMetaDataBuilder.putAlias(aliasMetaData);
                }
                for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
                    indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
                }
                indexMetaDataBuilder.state(request.state());
                final IndexMetaData indexMetaData;
                try {
                    indexMetaData = indexMetaDataBuilder.build();
                } catch (Exception e) {
                    removalExtraInfo = "failed to build index metadata";
                    throw e;
                }
                indexService.getIndexEventListener().beforeIndexAddedToCluster(indexMetaData.getIndex(), indexMetaData.getSettings());
                MetaData newMetaData = MetaData.builder(currentState.metaData()).put(indexMetaData, false).build();
                String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
                logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(), indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
                ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
                if (!request.blocks().isEmpty()) {
                    for (ClusterBlock block : request.blocks()) {
                        blocks.addIndexBlock(request.index(), block);
                    }
                }
                blocks.updateBlocks(indexMetaData);
                ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
                if (request.state() == State.OPEN) {
                    RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()).addAsNew(updatedState.metaData().index(request.index()));
                    updatedState = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), "index [" + request.index() + "] created");
                }
                removalExtraInfo = "cleaning up after validating index on master";
                removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED;
                return updatedState;
            } finally {
                if (createdIndex != null) {
                    // Index was already partially created - need to clean up
                    indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo);
                }
            }
        }

        @Override
        public void onFailure(String source, Exception e) {
            if (e instanceof ResourceAlreadyExistsException) {
                logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
            } else {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
            }
            super.onFailure(source, e);
        }
    });
}
Also used : ElasticsearchException(org.elasticsearch.ElasticsearchException) SETTING_INDEX_UUID(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID) DateTimeZone(org.joda.time.DateTimeZone) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) Alias(org.elasticsearch.action.admin.indices.alias.Alias) Environment(org.elasticsearch.env.Environment) BiFunction(java.util.function.BiFunction) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) CreateIndexClusterStateUpdateResponse(org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IndexCreationException(org.elasticsearch.indices.IndexCreationException) Locale(java.util.Locale) Map(java.util.Map) ValidationException(org.elasticsearch.common.ValidationException) ThreadPool(org.elasticsearch.threadpool.ThreadPool) State(org.elasticsearch.cluster.metadata.IndexMetaData.State) Path(java.nio.file.Path) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) CreateIndexClusterStateUpdateRequest(org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest) Priority(org.elasticsearch.common.Priority) Predicate(java.util.function.Predicate) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) ContextPreservingActionListener(org.elasticsearch.action.support.ContextPreservingActionListener) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) Version(org.elasticsearch.Version) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) Supplier(org.apache.logging.log4j.util.Supplier) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) AckedClusterStateUpdateTask(org.elasticsearch.cluster.AckedClusterStateUpdateTask) ClusterService(org.elasticsearch.cluster.service.ClusterService) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) ShardRoutingState(org.elasticsearch.cluster.routing.ShardRoutingState) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) ActiveShardsObserver(org.elasticsearch.action.support.ActiveShardsObserver) Strings(org.elasticsearch.common.Strings) Inject(org.elasticsearch.common.inject.Inject) ArrayList(java.util.ArrayList) SETTING_NUMBER_OF_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS) XContentHelper(org.elasticsearch.common.xcontent.XContentHelper) IndexRemovalReason(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason) Custom(org.elasticsearch.cluster.metadata.IndexMetaData.Custom) Regex(org.elasticsearch.common.regex.Regex) SETTING_VERSION_CREATED(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED) IndicesService(org.elasticsearch.indices.IndicesService) SETTING_AUTO_EXPAND_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS) ClusterBlockLevel(org.elasticsearch.cluster.block.ClusterBlockLevel) SETTING_CREATION_DATE(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE) PathUtils(org.elasticsearch.common.io.PathUtils) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) SETTING_NUMBER_OF_SHARDS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS) IndexService(org.elasticsearch.index.IndexService) DateTime(org.joda.time.DateTime) IOException(java.io.IOException) IndexScopedSettings(org.elasticsearch.common.settings.IndexScopedSettings) CollectionUtil(org.apache.lucene.util.CollectionUtil) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MergeReason(org.elasticsearch.index.mapper.MapperService.MergeReason) Comparator(java.util.Comparator) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) IndexService(org.elasticsearch.index.IndexService) Index(org.elasticsearch.index.Index) DateTime(org.joda.time.DateTime) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) Version(org.elasticsearch.Version) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) List(java.util.List) ArrayList(java.util.ArrayList) Supplier(org.apache.logging.log4j.util.Supplier) CreateIndexClusterStateUpdateResponse(org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse) Settings(org.elasticsearch.common.settings.Settings) IndexScopedSettings(org.elasticsearch.common.settings.IndexScopedSettings) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) ClusterState(org.elasticsearch.cluster.ClusterState) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) Custom(org.elasticsearch.cluster.metadata.IndexMetaData.Custom) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) ElasticsearchException(org.elasticsearch.ElasticsearchException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IndexCreationException(org.elasticsearch.indices.IndexCreationException) ValidationException(org.elasticsearch.common.ValidationException) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) IOException(java.io.IOException) IndexRemovalReason(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason) Alias(org.elasticsearch.action.admin.indices.alias.Alias) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) Map(java.util.Map) HashMap(java.util.HashMap) MapperService(org.elasticsearch.index.mapper.MapperService)

Example 20 with MapperService

use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.

the class MetaDataIndexUpgradeService method checkMappingsCompatibility.

/**
     * Checks the mappings for compatibility with the current version
     */
private void checkMappingsCompatibility(IndexMetaData indexMetaData) {
    try {
        // We cannot instantiate real analysis server at this point because the node might not have
        // been started yet. However, we don't really need real analyzers at this stage - so we can fake it
        IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
        SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
        final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", AnalyzerScope.INDEX, new Analyzer() {

            @Override
            protected TokenStreamComponents createComponents(String fieldName) {
                throw new UnsupportedOperationException("shouldn't be here");
            }
        });
        // this is just a fake map that always returns the same value for any possible string key
        // also the entrySet impl isn't fully correct but we implement it since internally
        // IndexAnalyzers will iterate over all analyzers to close them.
        final Map<String, NamedAnalyzer> analyzerMap = new AbstractMap<String, NamedAnalyzer>() {

            @Override
            public NamedAnalyzer get(Object key) {
                assert key instanceof String : "key must be a string but was: " + key.getClass();
                return new NamedAnalyzer((String) key, AnalyzerScope.INDEX, fakeDefault.analyzer());
            }

            @Override
            public Set<Entry<String, NamedAnalyzer>> entrySet() {
                return Collections.emptySet();
            }
        };
        try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) {
            MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null);
            mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false);
        }
    } catch (Exception ex) {
        // Wrap the inner exception so we have the index name in the exception message
        throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "]", ex);
    }
}
Also used : NamedAnalyzer(org.elasticsearch.index.analysis.NamedAnalyzer) IndexSettings(org.elasticsearch.index.IndexSettings) Analyzer(org.apache.lucene.analysis.Analyzer) NamedAnalyzer(org.elasticsearch.index.analysis.NamedAnalyzer) AbstractMap(java.util.AbstractMap) SimilarityService(org.elasticsearch.index.similarity.SimilarityService) IndexAnalyzers(org.elasticsearch.index.analysis.IndexAnalyzers) MapperService(org.elasticsearch.index.mapper.MapperService)

Aggregations

MapperService (org.elasticsearch.index.mapper.MapperService)46 Settings (org.elasticsearch.common.settings.Settings)16 DocumentMapper (org.elasticsearch.index.mapper.DocumentMapper)14 IndexSettings (org.elasticsearch.index.IndexSettings)13 CompressedXContent (org.elasticsearch.common.compress.CompressedXContent)12 IOException (java.io.IOException)10 Store (org.elasticsearch.index.store.Store)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 IndexMetadata (org.elasticsearch.cluster.metadata.IndexMetadata)8 Index (org.elasticsearch.index.Index)8 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)8 HashMap (java.util.HashMap)7 Map (java.util.Map)7 IndexService (org.elasticsearch.index.IndexService)7 AtomicLong (java.util.concurrent.atomic.AtomicLong)6 IndexAnalyzers (org.elasticsearch.index.analysis.IndexAnalyzers)6 Collections (java.util.Collections)5 HashSet (java.util.HashSet)5 List (java.util.List)5 Analyzer (org.apache.lucene.analysis.Analyzer)5