use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class IndexShardTestCase method newShard.
/**
* creates a new initializing shard.
* @param routing shard routing to use
* @param shardPath path to use for shard data
* @param indexMetadata indexMetadata for the shard, including any mapping
* @param storeProvider an optional custom store provider to use. If null a default file based store will be created
* @param globalCheckpointSyncer callback for syncing global checkpoints
* @param indexEventListener index event listener
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetadata indexMetadata, @Nullable CheckedFunction<IndexSettings, Store, IOException> storeProvider, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException {
final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings);
final IndexShard indexShard;
if (storeProvider == null) {
storeProvider = is -> createStore(is, shardPath);
}
final Store store = storeProvider.apply(indexSettings);
boolean success = false;
try {
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings));
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), indexSettings.getSettings(), "index");
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY);
ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
CircuitBreakerService breakerService = new HierarchyCircuitBreakerService(nodeSettings, clusterSettings);
indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, engineFactory, indexEventListener, threadPool, BigArrays.NON_RECYCLING_INSTANCE, Arrays.asList(listeners), globalCheckpointSyncer, retentionLeaseSyncer, breakerService);
indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER);
success = true;
} finally {
if (success == false) {
IOUtils.close(store);
}
}
return indexShard;
}
use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class EngineTestCase method createMapperService.
public static MapperService createMapperService(String type) throws IOException {
IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).putMapping(type, "{\"properties\": {}}").build();
MapperService mapperService = MapperTestUtils.newMapperService(new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), createTempDir(), Settings.EMPTY, "test");
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE);
return mapperService;
}
use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class InternalEngineTests method testNoOps.
/*
* This test tests that a no-op does not generate a new sequence number, that no-ops can advance the local checkpoint, and that no-ops
* are correctly added to the translog.
*/
@Test
public void testNoOps() throws IOException {
engine.close();
InternalEngine noOpEngine = null;
final int maxSeqNo = randomIntBetween(0, 128);
final int localCheckpoint = randomIntBetween(0, maxSeqNo);
try {
final BiFunction<Long, Long, LocalCheckpointTracker> supplier = (ms, lcp) -> new LocalCheckpointTracker(maxSeqNo, localCheckpoint);
EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy()));
noOpEngine = new InternalEngine(noopEngineConfig, supplier) {
@Override
protected long doGenerateSeqNoForOperation(Operation operation) {
throw new UnsupportedOperationException();
}
};
noOpEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get());
final String reason = "filling gaps";
noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason));
assertThat(noOpEngine.getProcessedLocalCheckpoint(), equalTo((long) (maxSeqNo + 1)));
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled));
noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason));
assertThat(noOpEngine.getProcessedLocalCheckpoint(), equalTo((long) (maxSeqNo + 2)));
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1));
// skip to the op that we added to the translog
Translog.Operation op;
Translog.Operation last = null;
try (Translog.Snapshot snapshot = noOpEngine.getTranslog().newSnapshot()) {
while ((op = snapshot.next()) != null) {
last = op;
}
}
assertNotNull(last);
assertThat(last, instanceOf(Translog.NoOp.class));
final Translog.NoOp noOp = (Translog.NoOp) last;
assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 2)));
assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get()));
assertThat(noOp.reason(), equalTo(reason));
if (engine.engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
MapperService mapperService = createMapperService("test");
List<Translog.Operation> operationsFromLucene = readAllOperationsInLucene(noOpEngine, mapperService);
// fills n gap and 2 manual noop.
assertThat(operationsFromLucene, hasSize(maxSeqNo + 2 - localCheckpoint));
for (int i = 0; i < operationsFromLucene.size(); i++) {
assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps")));
}
assertConsistentHistoryBetweenTranslogAndLuceneIndex(noOpEngine, mapperService);
}
} finally {
IOUtils.close(noOpEngine);
}
}
use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class InternalEngineTests method assertOperationHistoryInLucene.
private void assertOperationHistoryInLucene(List<Engine.Operation> operations) throws IOException {
final MergePolicy keepSoftDeleteDocsMP = new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, engine.config().getMergePolicy());
Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10));
final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
Set<Long> expectedSeqNos = new HashSet<>();
try (Store store = createStore();
Engine engine = createEngine(config(indexSettings, store, createTempDir(), keepSoftDeleteDocsMP, null))) {
for (Engine.Operation op : operations) {
if (op instanceof Engine.Index) {
Engine.IndexResult indexResult = engine.index((Engine.Index) op);
assertThat(indexResult.getFailure(), nullValue());
expectedSeqNos.add(indexResult.getSeqNo());
} else {
Engine.DeleteResult deleteResult = engine.delete((Engine.Delete) op);
assertThat(deleteResult.getFailure(), nullValue());
expectedSeqNos.add(deleteResult.getSeqNo());
}
if (rarely()) {
engine.refresh("test");
}
if (rarely()) {
engine.flush();
}
if (rarely()) {
engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
}
}
MapperService mapperService = createMapperService("test");
List<Translog.Operation> actualOps = readAllOperationsInLucene(engine, mapperService);
assertThat(actualOps.stream().map(o -> o.seqNo()).collect(Collectors.toList()), containsInAnyOrder(expectedSeqNos.toArray()));
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
}
}
use of org.elasticsearch.index.mapper.MapperService in project crate by crate.
the class InternalEngineTests method testForceMergeWithSoftDeletesRetentionAndRecoverySource.
@Test
public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exception {
final long retainedExtraOps = randomLongBetween(0, 10);
Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps);
final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final MapperService mapperService = createMapperService("test");
final boolean omitSourceAllTheTime = randomBoolean();
final Set<String> liveDocs = new HashSet<>();
final Set<String> liveDocsWithSource = new HashSet<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) {
int numDocs = scaledRandomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource);
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
if (useRecoverySource == false) {
liveDocsWithSource.add(Integer.toString(i));
}
}
for (int i = 0; i < numDocs; i++) {
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource);
if (randomBoolean()) {
engine.delete(new Engine.Delete(doc.id(), newUid(doc.id()), primaryTerm.get()));
liveDocs.remove(doc.id());
liveDocsWithSource.remove(doc.id());
}
if (randomBoolean()) {
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
if (useRecoverySource == false) {
liveDocsWithSource.add(doc.id());
} else {
liveDocsWithSource.remove(doc.id());
}
}
if (randomBoolean()) {
engine.flush(randomBoolean(), true);
}
}
engine.flush();
globalCheckpoint.set(randomLongBetween(0, engine.getPersistedLocalCheckpoint()));
engine.syncTranslog();
final long minSeqNoToRetain;
try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) {
long safeCommitLocalCheckpoint = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitLocalCheckpoint + 1);
}
engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
Map<Long, Translog.Operation> ops = readAllOperationsInLucene(engine, mapperService).stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity()));
for (long seqno = 0; seqno <= engine.getPersistedLocalCheckpoint(); seqno++) {
String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]";
if (seqno < minSeqNoToRetain) {
Translog.Operation op = ops.get(seqno);
if (op != null) {
assertThat(op, instanceOf(Translog.Index.class));
assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs));
}
} else {
Translog.Operation op = ops.get(seqno);
assertThat(msg, op, notNullValue());
if (op instanceof Translog.Index) {
assertEquals(msg, ((Translog.Index) op).source(), B_1);
}
}
}
settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0);
indexSettings.updateIndexMetadata(IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build());
engine.onSettingsChanged(indexSettings.getTranslogRetentionAge(), indexSettings.getTranslogRetentionSize(), indexSettings.getSoftDeleteRetentionOperations());
// If we already merged down to 1 segment, then the next force-merge will be a noop. We need to add an extra segment to make
// merges happen so we can verify that _recovery_source are pruned. See: https://github.com/elastic/elasticsearch/issues/41628.
final int numSegments;
try (Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL)) {
numSegments = searcher.getDirectoryReader().leaves().size();
}
if (numSegments == 1) {
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
ParsedDocument doc = testParsedDocument("dummy", null, testDocument(), B_1, null, useRecoverySource);
engine.index(indexForDoc(doc));
if (useRecoverySource == false) {
liveDocsWithSource.add(doc.id());
}
engine.syncTranslog();
globalCheckpoint.set(engine.getPersistedLocalCheckpoint());
engine.flush(randomBoolean(), true);
} else {
globalCheckpoint.set(engine.getPersistedLocalCheckpoint());
engine.syncTranslog();
}
engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocsWithSource.size()));
}
}
Aggregations