use of org.elasticsearch.common.util.concurrent.ReleasableLock in project elasticsearch by elastic.
the class InternalEngine method acquireIndexCommit.
@Override
public IndexCommit acquireIndexCommit(final boolean flushFirst) throws EngineException {
// the to a write lock when we fail the engine in this operation
if (flushFirst) {
logger.trace("start flush for snapshot");
flush(false, true);
logger.trace("finish flush for snapshot");
}
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
logger.trace("pulling snapshot");
return deletionPolicy.snapshot();
} catch (IOException e) {
throw new SnapshotFailedEngineException(shardId, e);
}
}
use of org.elasticsearch.common.util.concurrent.ReleasableLock in project elasticsearch by elastic.
the class InternalEngine method index.
@Override
public IndexResult index(Index index) throws IOException {
final boolean doThrottle = index.origin().isRecovery() == false;
try (ReleasableLock releasableLock = readLock.acquire()) {
ensureOpen();
assert assertSequenceNumber(index.origin(), index.seqNo());
assert assertVersionType(index);
final Translog.Location location;
long seqNo = index.seqNo();
try (Releasable ignored = acquireLock(index.uid());
Releasable indexThrottle = doThrottle ? () -> {
} : throttle.acquireThrottle()) {
lastWriteNanos = index.startTime();
/* if we have an autoGeneratedID that comes into the engine we can potentially optimize
* and just use addDocument instead of updateDocument and skip the entire version and index lookup across the board.
* Yet, we have to deal with multiple document delivery, for this we use a property of the document that is added
* to detect if it has potentially been added before. We use the documents timestamp for this since it's something
* that:
* - doesn't change per document
* - is preserved in the transaction log
* - and is assigned before we start to index / replicate
* NOTE: it's not important for this timestamp to be consistent across nodes etc. it's just a number that is in the common
* case increasing and can be used in the failure case when we retry and resent documents to establish a happens before relationship.
* for instance:
* - doc A has autoGeneratedIdTimestamp = 10, isRetry = false
* - doc B has autoGeneratedIdTimestamp = 9, isRetry = false
*
* while both docs are in in flight, we disconnect on one node, reconnect and send doc A again
* - now doc A' has autoGeneratedIdTimestamp = 10, isRetry = true
*
* if A' arrives on the shard first we update maxUnsafeAutoIdTimestamp to 10 and use update document. All subsequent
* documents that arrive (A and B) will also use updateDocument since their timestamps are less than maxUnsafeAutoIdTimestamp.
* While this is not strictly needed for doc B it is just much simpler to implement since it will just de-optimize some doc in the worst case.
*
* if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls
* updateDocument.
*/
long currentVersion;
final boolean deleted;
// if anything is fishy here ie. there is a retry we go and force updateDocument below so we are updating the document in the
// lucene index without checking the version map but we still do the version check
final boolean forceUpdateDocument;
final boolean canOptimizeAddDocument = canOptimizeAddDocument(index);
if (canOptimizeAddDocument) {
forceUpdateDocument = isForceUpdateDocument(index);
currentVersion = Versions.NOT_FOUND;
deleted = true;
} else {
// update the document
// we don't force it - it depends on the version
forceUpdateDocument = false;
final VersionValue versionValue = versionMap.getUnderLock(index.uid());
assert incrementVersionLookup();
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(index.uid());
deleted = currentVersion == Versions.NOT_FOUND;
} else {
currentVersion = checkDeletedAndGCed(versionValue);
deleted = versionValue.delete();
}
}
final long expectedVersion = index.version();
Optional<IndexResult> resultOnVersionConflict;
try {
final boolean isVersionConflict = checkVersionConflict(index, currentVersion, expectedVersion, deleted);
resultOnVersionConflict = isVersionConflict ? Optional.of(new IndexResult(currentVersion, index.seqNo(), false)) : Optional.empty();
} catch (IllegalArgumentException | VersionConflictEngineException ex) {
resultOnVersionConflict = Optional.of(new IndexResult(ex, currentVersion, index.seqNo()));
}
final IndexResult indexResult;
if (resultOnVersionConflict.isPresent()) {
indexResult = resultOnVersionConflict.get();
} else {
// no version conflict
if (index.origin() == Operation.Origin.PRIMARY) {
seqNo = seqNoService().generateSeqNo();
}
indexResult = indexIntoLucene(index, seqNo, currentVersion, deleted, forceUpdateDocument, canOptimizeAddDocument, expectedVersion);
}
if (indexResult.hasFailure() == false) {
location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY ? translog.add(new Translog.Index(index, indexResult)) : null;
indexResult.setTranslogLocation(location);
}
indexResult.setTook(System.nanoTime() - index.startTime());
indexResult.freeze();
return indexResult;
} finally {
if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
seqNoService().markSeqNoAsCompleted(seqNo);
}
}
} catch (RuntimeException | IOException e) {
try {
maybeFailEngine("index", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw e;
}
}
use of org.elasticsearch.common.util.concurrent.ReleasableLock in project elasticsearch by elastic.
the class InternalEngine method syncFlush.
@Override
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException {
// best effort attempt before we acquire locks
ensureOpen();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
ensureCanFlush();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
logger.trace("starting sync commit [{}]", syncId);
commitIndexWriter(indexWriter, translog, syncId);
logger.debug("successfully sync committed. sync id [{}].", syncId);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
return SyncedFlushResult.SUCCESS;
} catch (IOException ex) {
maybeFailEngine("sync commit", ex);
throw new EngineException(shardId, "failed to sync commit", ex);
}
}
use of org.elasticsearch.common.util.concurrent.ReleasableLock in project elasticsearch by elastic.
the class ShadowEngine method flush.
@Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
logger.trace("skipping FLUSH on shadow engine");
// reread the last committed segment infos
refresh("flush");
/*
* we have to inc-ref the store here since if the engine is closed by a tragic event
* we don't acquire the write lock and wait until we have exclusive access. This might also
* dec the store reference which can essentially close the store and unless we can inc the reference
* we can't use it.
*/
store.incRef();
try (ReleasableLock lock = readLock.acquire()) {
// reread the last committed segment infos
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
} catch (Exception e) {
if (isClosed.get() == false) {
logger.warn("failed to read latest segment infos on flush", e);
if (Lucene.isCorruptionException(e)) {
throw new FlushFailedEngineException(shardId, e);
}
}
} finally {
store.decRef();
}
return new CommitId(lastCommittedSegmentInfos.getId());
}
use of org.elasticsearch.common.util.concurrent.ReleasableLock in project elasticsearch by elastic.
the class ShadowEngine method refresh.
@Override
public void refresh(String source) throws EngineException {
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
searcherManager.maybeRefreshBlocking();
} catch (AlreadyClosedException e) {
throw e;
} catch (Exception e) {
try {
failEngine("refresh failed", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw new RefreshFailedEngineException(shardId, e);
}
}
Aggregations