use of org.elasticsearch.index.mapper.Uid in project elasticsearch by elastic.
the class IndexShardTestCase method getShardDocUIDs.
protected Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
shard.refresh("get_uids");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
Set<Uid> ids = new HashSet<>();
for (LeafReaderContext leafContext : searcher.reader().leaves()) {
LeafReader reader = leafContext.reader();
Bits liveDocs = reader.getLiveDocs();
for (int i = 0; i < reader.maxDoc(); i++) {
if (liveDocs == null || liveDocs.get(i)) {
Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
}
}
}
return ids;
}
}
use of org.elasticsearch.index.mapper.Uid in project crate by crate.
the class InternalEngineTests method testRebuildLocalCheckpointTrackerAndVersionMap.
@Test
public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception {
Settings.Builder settings = Settings.builder().put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true);
final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
Path translogPath = createTempDir();
List<Engine.Operation> operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean());
List<List<Engine.Operation>> commits = new ArrayList<>();
commits.add(new ArrayList<>());
try (Store store = createStore()) {
EngineConfig config = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get);
final List<DocIdSeqNoAndSource> docs;
try (InternalEngine engine = createEngine(config)) {
List<Engine.Operation> flushedOperations = new ArrayList<>();
for (Engine.Operation op : operations) {
flushedOperations.add(op);
applyOperation(engine, op);
if (randomBoolean()) {
engine.syncTranslog();
globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint()));
}
if (randomInt(100) < 10) {
engine.refresh("test");
}
if (randomInt(100) < 5) {
engine.flush(true, true);
flushedOperations.sort(Comparator.comparing(Engine.Operation::seqNo));
commits.add(new ArrayList<>(flushedOperations));
}
}
docs = getDocIds(engine, true);
}
List<Engine.Operation> operationsInSafeCommit = null;
for (int i = commits.size() - 1; i >= 0; i--) {
if (commits.get(i).stream().allMatch(op -> op.seqNo() <= globalCheckpoint.get())) {
operationsInSafeCommit = commits.get(i);
break;
}
}
assertThat(operationsInSafeCommit, notNullValue());
try (InternalEngine engine = new InternalEngine(config)) {
// do not recover from translog
final Map<BytesRef, Engine.Operation> deletesAfterCheckpoint = new HashMap<>();
for (Engine.Operation op : operationsInSafeCommit) {
if (op instanceof Engine.NoOp == false && op.seqNo() > engine.getPersistedLocalCheckpoint()) {
deletesAfterCheckpoint.put(new Term(IdFieldMapper.NAME, Uid.encodeId(op.id())).bytes(), op);
}
}
deletesAfterCheckpoint.values().removeIf(o -> o instanceof Engine.Delete == false);
final Map<BytesRef, VersionValue> versionMap = engine.getVersionMap();
for (BytesRef uid : deletesAfterCheckpoint.keySet()) {
final VersionValue versionValue = versionMap.get(uid);
final Engine.Operation op = deletesAfterCheckpoint.get(uid);
final String msg = versionValue + " vs " + "op[" + op.operationType() + "id=" + op.id() + " seqno=" + op.seqNo() + " term=" + op.primaryTerm() + "]";
assertThat(versionValue, instanceOf(DeleteVersionValue.class));
assertThat(msg, versionValue.seqNo, equalTo(op.seqNo()));
assertThat(msg, versionValue.term, equalTo(op.primaryTerm()));
assertThat(msg, versionValue.version, equalTo(op.version()));
}
assertThat(versionMap.keySet(), equalTo(deletesAfterCheckpoint.keySet()));
final LocalCheckpointTracker tracker = engine.getLocalCheckpointTracker();
final Set<Long> seqNosInSafeCommit = operationsInSafeCommit.stream().map(op -> op.seqNo()).collect(Collectors.toSet());
for (Engine.Operation op : operations) {
assertThat("seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + " checkpoint=" + tracker.getProcessedCheckpoint(), tracker.hasProcessed(op.seqNo()), equalTo(seqNosInSafeCommit.contains(op.seqNo())));
}
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
assertThat(getDocIds(engine, true), equalTo(docs));
}
}
}
use of org.elasticsearch.index.mapper.Uid in project elasticsearch by elastic.
the class TranslogRecoveryPerformer method performRecoveryOperation.
/**
* Performs a single recovery operation.
*
* @param allowMappingUpdates true if mapping update should be accepted (but collected). Setting it to false will
* cause a {@link MapperException} to be thrown if an update
* is encountered.
*/
private void performRecoveryOperation(Engine engine, Translog.Operation operation, boolean allowMappingUpdates, Engine.Operation.Origin origin) throws IOException {
try {
switch(operation.opType()) {
case INDEX:
Translog.Index index = (Translog.Index) operation;
// we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
// autoGeneratedID docs that are coming from the primary are updated correctly.
Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source())).routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true);
maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates);
logger.trace("[translog] recover [index] op [({}, {})] of [{}][{}]", index.seqNo(), index.primaryTerm(), index.type(), index.id());
index(engine, engineIndex);
break;
case DELETE:
Translog.Delete delete = (Translog.Delete) operation;
Uid uid = Uid.createUid(delete.uid().text());
logger.trace("[translog] recover [delete] op [({}, {})] of [{}][{}]", delete.seqNo(), delete.primaryTerm(), uid.type(), uid.id());
final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.seqNo(), delete.primaryTerm(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime());
delete(engine, engineDelete);
break;
case NO_OP:
final Translog.NoOp noOp = (Translog.NoOp) operation;
final long seqNo = noOp.seqNo();
final long primaryTerm = noOp.primaryTerm();
final String reason = noOp.reason();
logger.trace("[translog] recover [no_op] op [({}, {})] of [{}]", seqNo, primaryTerm, reason);
final Engine.NoOp engineNoOp = new Engine.NoOp(null, seqNo, primaryTerm, 0, VersionType.INTERNAL, origin, System.nanoTime(), reason);
noOp(engine, engineNoOp);
break;
default:
throw new IllegalStateException("No operation defined for [" + operation + "]");
}
} catch (ElasticsearchException e) {
boolean hasIgnoreOnRecoveryException = false;
ElasticsearchException current = e;
while (true) {
if (current instanceof IgnoreOnRecoveryEngineException) {
hasIgnoreOnRecoveryException = true;
break;
}
if (current.getCause() instanceof ElasticsearchException) {
current = (ElasticsearchException) current.getCause();
} else {
break;
}
}
if (!hasIgnoreOnRecoveryException) {
throw e;
}
}
operationProcessed();
}
use of org.elasticsearch.index.mapper.Uid in project elasticsearch by elastic.
the class IndexShardTests method testRestoreShard.
public void testRestoreShard() throws IOException {
final IndexShard source = newStartedShard(true);
IndexShard target = newStartedShard(true);
indexDoc(source, "test", "0");
if (randomBoolean()) {
source.refresh("test");
}
indexDoc(target, "test", "1");
target.refresh("test");
assertDocs(target, new Uid("test", "1"));
// only flush source
flushShard(source);
final ShardRouting origRouting = target.routingEntry();
ShardRouting routing = ShardRoutingHelper.reinitPrimary(origRouting);
final Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID()));
routing = ShardRoutingHelper.newWithRestoreSource(routing, new RecoverySource.SnapshotRecoverySource(snapshot, Version.CURRENT, "test"));
target = reinitShard(target, routing);
Store sourceStore = source.store();
Store targetStore = target.store();
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
target.markAsRecovering("store", new RecoveryState(routing, localNode, null));
assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") {
@Override
public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) {
try {
cleanLuceneIndex(targetStore.directory());
for (String file : sourceStore.directory().listAll()) {
if (file.equals("write.lock") || file.startsWith("extra")) {
continue;
}
targetStore.directory().copyFrom(sourceStore.directory(), file, file, IOContext.DEFAULT);
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}));
target.updateRoutingEntry(routing.moveToStarted());
assertDocs(target, new Uid("test", "0"));
closeShards(source, target);
}
use of org.elasticsearch.index.mapper.Uid in project elasticsearch by elastic.
the class Engine method getFromSearcher.
protected final GetResult getFromSearcher(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
final Searcher searcher = searcherFactory.apply("get");
final Versions.DocIdAndVersion docIdAndVersion;
try {
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
} catch (Exception e) {
Releasables.closeWhileHandlingException(searcher);
//TODO: A better exception goes here
throw new EngineException(shardId, "Couldn't resolve version", e);
}
if (docIdAndVersion != null) {
if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
Releasables.close(searcher);
Uid uid = Uid.createUid(get.uid().text());
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), get.versionType().explainConflictForReads(docIdAndVersion.version, get.version()));
}
}
if (docIdAndVersion != null) {
// responsibility of the caller to call GetResult.release
return new GetResult(searcher, docIdAndVersion);
} else {
Releasables.close(searcher);
return GetResult.NOT_EXISTS;
}
}
Aggregations