use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class MapperTestUtils method newMapperService.
public static MapperService newMapperService(NamedXContentRegistry xContentRegistry, Path tempDir, Settings settings, IndicesModule indicesModule, String indexName) throws IOException {
Settings.Builder settingsBuilder = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), tempDir).put(settings);
if (settings.get(IndexMetadata.SETTING_VERSION_CREATED) == null) {
settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT);
}
Settings finalSettings = settingsBuilder.build();
MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers;
return new MapperService(indexSettings, indexAnalyzers, xContentRegistry, mapperRegistry, () -> null);
}
use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class BlobStoreRepository method snapshotShard.
@Override
public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, boolean writeShardGens, ActionListener<String> listener) {
final ShardId shardId = store.shardId();
final long startTime = threadPool.absoluteTimeInMillis();
try {
final String generation = snapshotStatus.generation();
LOGGER.debug("[{}] [{}] snapshot to [{}] [{}] ...", shardId, snapshotId, metadata.name(), generation);
final BlobContainer shardContainer = shardContainer(indexId, shardId);
final Set<String> blobs;
if (generation == null) {
try {
blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX).keySet();
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
}
} else {
blobs = Collections.singleton(INDEX_FILE_PREFIX + generation);
}
Tuple<BlobStoreIndexShardSnapshots, String> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, generation);
BlobStoreIndexShardSnapshots snapshots = tuple.v1();
final String fileListGeneration = tuple.v2();
if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) {
throw new IndexShardSnapshotFailedException(shardId, "Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting");
}
final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = new ArrayList<>();
final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new LinkedBlockingQueue<>();
store.incRef();
final Collection<String> fileNames;
final Store.MetadataSnapshot metadataFromStore;
try {
// TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
try {
LOGGER.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit);
metadataFromStore = store.getMetadata(snapshotIndexCommit);
fileNames = snapshotIndexCommit.getFileNames();
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
}
} finally {
store.decRef();
}
int indexIncrementalFileCount = 0;
int indexTotalNumberOfFiles = 0;
long indexIncrementalSize = 0;
long indexTotalFileCount = 0;
for (String fileName : fileNames) {
if (snapshotStatus.isAborted()) {
LOGGER.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
throw new IndexShardSnapshotFailedException(shardId, "Aborted");
}
LOGGER.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
final StoreFileMetadata md = metadataFromStore.get(fileName);
BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null;
List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
if (filesInfo != null) {
for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
if (fileInfo.isSame(md)) {
// a commit point file with the same name, size and checksum was already copied to repository
// we will reuse it for this snapshot
existingFileInfo = fileInfo;
break;
}
}
}
indexTotalFileCount += md.length();
indexTotalNumberOfFiles++;
if (existingFileInfo == null) {
indexIncrementalFileCount++;
indexIncrementalSize += md.length();
// create a new FileInfo
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize());
indexCommitPointFiles.add(snapshotFileInfo);
filesToSnapshot.add(snapshotFileInfo);
} else {
indexCommitPointFiles.add(existingFileInfo);
}
}
snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount);
assert indexIncrementalFileCount == filesToSnapshot.size();
final StepListener<Collection<Void>> allFilesUploadedListener = new StepListener<>();
allFilesUploadedListener.whenComplete(v -> {
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration());
// now create and write the commit point
final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), indexCommitPointFiles, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), lastSnapshotStatus.getIncrementalFileCount(), lastSnapshotStatus.getIncrementalSize());
LOGGER.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
try {
indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID(), false);
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
}
// build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
List<SnapshotFiles> newSnapshotsList = new ArrayList<>();
newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
for (SnapshotFiles point : snapshots) {
newSnapshotsList.add(point);
}
final List<String> blobsToDelete;
final String indexGeneration;
if (writeShardGens) {
indexGeneration = UUIDs.randomBase64UUID();
blobsToDelete = Collections.emptyList();
} else {
indexGeneration = Long.toString(Long.parseLong(fileListGeneration) + 1);
// Delete all previous index-N blobs
blobsToDelete = blobs.stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList());
assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))).max().orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N blobs " + blobsToDelete;
}
try {
writeShardIndexBlob(shardContainer, indexGeneration, new BlobStoreIndexShardSnapshots(newSnapshotsList));
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e);
}
if (writeShardGens == false) {
try {
shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete);
} catch (IOException e) {
LOGGER.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", snapshotId, shardId), e);
}
}
snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), indexGeneration);
listener.onResponse(indexGeneration);
}, listener::onFailure);
if (indexIncrementalFileCount == 0) {
allFilesUploadedListener.onResponse(Collections.emptyList());
return;
}
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
int maximumPoolSize = executor instanceof ThreadPoolExecutor ? ((ThreadPoolExecutor) executor).getMaximumPoolSize() : 1;
// Start as many workers as fit into the snapshot pool at once at the most
final int workers = Math.min(maximumPoolSize, indexIncrementalFileCount);
final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, workers, allFilesUploadedListener);
for (int i = 0; i < workers; ++i) {
executor.execute(ActionRunnable.run(filesListener, () -> {
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
if (snapshotFileInfo != null) {
store.incRef();
try {
do {
snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store);
snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
} while (snapshotFileInfo != null);
} finally {
store.decRef();
}
}
}));
}
} catch (Exception e) {
listener.onFailure(e);
}
}
use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class InternalEngineTests method testForceMergeWithSoftDeletesRetention.
@Test
public void testForceMergeWithSoftDeletesRetention() throws Exception {
final long retainedExtraOps = randomLongBetween(0, 10);
Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps);
final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final MapperService mapperService = createMapperService("test");
final Set<String> liveDocs = new HashSet<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) {
int numDocs = scaledRandomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
}
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
if (randomBoolean()) {
engine.delete(new Engine.Delete(doc.id(), newUid(doc.id()), primaryTerm.get()));
liveDocs.remove(doc.id());
}
if (randomBoolean()) {
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
}
if (randomBoolean()) {
engine.flush(randomBoolean(), true);
}
}
engine.flush();
long localCheckpoint = engine.getProcessedLocalCheckpoint();
globalCheckpoint.set(randomLongBetween(0, localCheckpoint));
engine.syncTranslog();
final long safeCommitCheckpoint;
try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) {
safeCommitCheckpoint = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
}
engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
Map<Long, Translog.Operation> ops = readAllOperationsInLucene(engine, mapperService).stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity()));
for (long seqno = 0; seqno <= localCheckpoint; seqno++) {
long minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitCheckpoint + 1);
String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]";
if (seqno < minSeqNoToRetain) {
Translog.Operation op = ops.get(seqno);
if (op != null) {
assertThat(op, instanceOf(Translog.Index.class));
assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs));
assertEquals(msg, ((Translog.Index) op).source(), B_1);
}
} else {
assertThat(msg, ops.get(seqno), notNullValue());
}
}
settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0);
indexSettings.updateIndexMetadata(IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build());
engine.onSettingsChanged(indexSettings.getTranslogRetentionAge(), indexSettings.getTranslogRetentionSize(), indexSettings.getSoftDeleteRetentionOperations());
globalCheckpoint.set(localCheckpoint);
engine.syncTranslog();
engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocs.size()));
}
}
use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class MetaDataCreateIndexService method onlyCreateIndex.
private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
Settings.Builder updatedSettingsBuilder = Settings.builder();
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
indexScopedSettings.validate(updatedSettingsBuilder);
request.settings(updatedSettingsBuilder.build());
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, wrapPreservingContext(listener)) {
@Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Index createdIndex = null;
String removalExtraInfo = null;
IndexRemovalReason removalReason = IndexRemovalReason.FAILURE;
try {
validate(request, currentState);
for (Alias alias : request.aliases()) {
aliasValidator.validateAlias(alias, request.index(), currentState.metaData());
}
// we only find a template when its an API call (a new index)
// find templates, highest order are better matching
List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
Map<String, Custom> customs = new HashMap<>();
// add the request mapping
Map<String, Map<String, Object>> mappings = new HashMap<>();
Map<String, AliasMetaData> templatesAliases = new HashMap<>();
List<String> templateNames = new ArrayList<>();
for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
}
for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
customs.put(entry.getKey(), entry.getValue());
}
// apply templates, merging the mappings into the request mapping if exists
for (IndexTemplateMetaData template : templates) {
templateNames.add(template.getName());
for (ObjectObjectCursor<String, CompressedXContent> cursor : template.mappings()) {
String mappingString = cursor.value.string();
if (mappings.containsKey(cursor.key)) {
XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(xContentRegistry, mappingString));
} else {
mappings.put(cursor.key, MapperService.parseMapping(xContentRegistry, mappingString));
}
}
// handle custom
for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
String type = cursor.key;
IndexMetaData.Custom custom = cursor.value;
IndexMetaData.Custom existing = customs.get(type);
if (existing == null) {
customs.put(type, custom);
} else {
IndexMetaData.Custom merged = existing.mergeWith(custom);
customs.put(type, merged);
}
}
//handle aliases
for (ObjectObjectCursor<String, AliasMetaData> cursor : template.aliases()) {
AliasMetaData aliasMetaData = cursor.value;
// ignore this one taken from the index template
if (request.aliases().contains(new Alias(aliasMetaData.alias()))) {
continue;
}
//if an alias with same name was already processed, ignore this one
if (templatesAliases.containsKey(cursor.key)) {
continue;
}
//Allow templatesAliases to be templated by replacing a token with the name of the index that we are applying it to
if (aliasMetaData.alias().contains("{index}")) {
String templatedAlias = aliasMetaData.alias().replace("{index}", request.index());
aliasMetaData = AliasMetaData.newAliasMetaData(aliasMetaData, templatedAlias);
}
aliasValidator.validateAliasMetaData(aliasMetaData, request.index(), currentState.metaData());
templatesAliases.put(aliasMetaData.alias(), aliasMetaData);
}
}
Settings.Builder indexSettingsBuilder = Settings.builder();
// apply templates, here, in reverse order, since first ones are better matching
for (int i = templates.size() - 1; i >= 0; i--) {
indexSettingsBuilder.put(templates.get(i).settings());
}
// now, put the request settings, so they override templates
indexSettingsBuilder.put(request.settings());
if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
}
if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
}
if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
}
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
DiscoveryNodes nodes = currentState.nodes();
final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
}
if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
}
indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
final Index shrinkFromIndex = request.shrinkFrom();
int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());
;
if (shrinkFromIndex != null) {
prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index());
IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
routingNumShards = sourceMetaData.getRoutingNumShards();
}
Settings actualIndexSettings = indexSettingsBuilder.build();
IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()).setRoutingNumShards(routingNumShards);
// Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
ActiveShardCount waitForActiveShards = request.waitForActiveShards();
if (waitForActiveShards == ActiveShardCount.DEFAULT) {
waitForActiveShards = tmpImd.getWaitForActiveShards();
}
if (waitForActiveShards.validate(tmpImd.getNumberOfReplicas()) == false) {
throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() + "]: cannot be greater than number of shard copies [" + (tmpImd.getNumberOfReplicas() + 1) + "]");
}
// create the index here (on the master) to validate it can be created, as well as adding the mapping
final IndexService indexService = indicesService.createIndex(tmpImd, Collections.emptyList(), shardId -> {
});
createdIndex = indexService.index();
// now add the mappings
MapperService mapperService = indexService.mapperService();
try {
mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (Exception e) {
removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
throw e;
}
// the context is only used for validation so it's fine to pass fake values for the shard id and the current
// timestamp
final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L);
for (Alias alias : request.aliases()) {
if (Strings.hasLength(alias.filter())) {
aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry);
}
}
for (AliasMetaData aliasMetaData : templatesAliases.values()) {
if (aliasMetaData.filter() != null) {
aliasValidator.validateAliasFilter(aliasMetaData.alias(), aliasMetaData.filter().uncompressed(), queryShardContext, xContentRegistry);
}
}
// now, update the mappings with the actual source
Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
for (DocumentMapper mapper : mapperService.docMappers(true)) {
MappingMetaData mappingMd = new MappingMetaData(mapper);
mappingsMetaData.put(mapper.type(), mappingMd);
}
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings).setRoutingNumShards(routingNumShards);
for (MappingMetaData mappingMd : mappingsMetaData.values()) {
indexMetaDataBuilder.putMapping(mappingMd);
}
for (AliasMetaData aliasMetaData : templatesAliases.values()) {
indexMetaDataBuilder.putAlias(aliasMetaData);
}
for (Alias alias : request.aliases()) {
AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()).indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build();
indexMetaDataBuilder.putAlias(aliasMetaData);
}
for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
}
indexMetaDataBuilder.state(request.state());
final IndexMetaData indexMetaData;
try {
indexMetaData = indexMetaDataBuilder.build();
} catch (Exception e) {
removalExtraInfo = "failed to build index metadata";
throw e;
}
indexService.getIndexEventListener().beforeIndexAddedToCluster(indexMetaData.getIndex(), indexMetaData.getSettings());
MetaData newMetaData = MetaData.builder(currentState.metaData()).put(indexMetaData, false).build();
String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(), indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
if (!request.blocks().isEmpty()) {
for (ClusterBlock block : request.blocks()) {
blocks.addIndexBlock(request.index(), block);
}
}
blocks.updateBlocks(indexMetaData);
ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
if (request.state() == State.OPEN) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()).addAsNew(updatedState.metaData().index(request.index()));
updatedState = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), "index [" + request.index() + "] created");
}
removalExtraInfo = "cleaning up after validating index on master";
removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED;
return updatedState;
} finally {
if (createdIndex != null) {
// Index was already partially created - need to clean up
indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo);
}
}
}
@Override
public void onFailure(String source, Exception e) {
if (e instanceof ResourceAlreadyExistsException) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
}
super.onFailure(source, e);
}
});
}
use of org.elasticsearch.index.mapper.MapperService in project elasticsearch by elastic.
the class MetaDataIndexUpgradeService method checkMappingsCompatibility.
/**
* Checks the mappings for compatibility with the current version
*/
private void checkMappingsCompatibility(IndexMetaData indexMetaData) {
try {
// We cannot instantiate real analysis server at this point because the node might not have
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", AnalyzerScope.INDEX, new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
throw new UnsupportedOperationException("shouldn't be here");
}
});
// this is just a fake map that always returns the same value for any possible string key
// also the entrySet impl isn't fully correct but we implement it since internally
// IndexAnalyzers will iterate over all analyzers to close them.
final Map<String, NamedAnalyzer> analyzerMap = new AbstractMap<String, NamedAnalyzer>() {
@Override
public NamedAnalyzer get(Object key) {
assert key instanceof String : "key must be a string but was: " + key.getClass();
return new NamedAnalyzer((String) key, AnalyzerScope.INDEX, fakeDefault.analyzer());
}
@Override
public Set<Entry<String, NamedAnalyzer>> entrySet() {
return Collections.emptySet();
}
};
try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) {
MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null);
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false);
}
} catch (Exception ex) {
// Wrap the inner exception so we have the index name in the exception message
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "]", ex);
}
}
Aggregations