use of org.opensearch.common.util.concurrent.ReleasableLock in project OpenSearch by opensearch-project.
the class Translog method getMaxSeqNo.
/**
* Returns the max seq_no of translog operations found in this translog. Since this value is calculated based on the current
* existing readers, this value is not necessary to be the max seq_no of all operations have been stored in this translog.
*/
public long getMaxSeqNo() {
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
final OptionalLong maxSeqNo = Stream.concat(readers.stream(), Stream.of(current)).mapToLong(reader -> reader.getCheckpoint().maxSeqNo).max();
assert maxSeqNo.isPresent() : "must have at least one translog generation";
return maxSeqNo.getAsLong();
}
}
use of org.opensearch.common.util.concurrent.ReleasableLock in project OpenSearch by opensearch-project.
the class Translog method recoverFromFiles.
/**
* recover all translog files found on disk
*/
private ArrayList<TranslogReader> recoverFromFiles(Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
try (ReleasableLock ignored = writeLock.acquire()) {
logger.debug("open uncommitted translog checkpoint {}", checkpoint);
final long minGenerationToRecoverFrom = checkpoint.minTranslogGeneration;
// translog was found.
for (long i = checkpoint.generation; i >= minGenerationToRecoverFrom; i--) {
Path committedTranslogFile = location.resolve(getFilename(i));
if (Files.exists(committedTranslogFile) == false) {
throw new TranslogCorruptedException(committedTranslogFile.toString(), "translog file doesn't exist with generation: " + i + " recovering from: " + minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
final Checkpoint readerCheckpoint = i == checkpoint.generation ? checkpoint : Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)));
final TranslogReader reader = openReader(committedTranslogFile, readerCheckpoint);
assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() : "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "] translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]";
foundTranslogs.add(reader);
logger.debug("recovered local translog from checkpoint {}", checkpoint);
}
Collections.reverse(foundTranslogs);
// when we clean up files, we first update the checkpoint with a new minReferencedTranslog and then delete them;
// if we crash just at the wrong moment, it may be that we leave one unreferenced file behind so we delete it if there
IOUtils.deleteFilesIgnoringExceptions(location.resolve(getFilename(minGenerationToRecoverFrom - 1)), location.resolve(getCommitCheckpointFileName(minGenerationToRecoverFrom - 1)));
Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
if (Files.exists(commitCheckpoint)) {
Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
if (checkpoint.equals(checkpointFromDisk) == false) {
throw new TranslogCorruptedException(commitCheckpoint.toString(), "checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content: expected " + checkpoint + " but got " + checkpointFromDisk);
}
} else {
copyCheckpointTo(commitCheckpoint);
}
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(foundTranslogs);
}
}
return foundTranslogs;
}
use of org.opensearch.common.util.concurrent.ReleasableLock in project OpenSearch by opensearch-project.
the class Translog method add.
/**
* Adds an operation to the transaction log.
*
* @param operation the operation to add
* @return the location of the operation in the translog
* @throws IOException if adding the operation to the translog resulted in an I/O exception
*/
public Location add(final Operation operation) throws IOException {
final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays);
try {
final long start = out.position();
out.skip(Integer.BYTES);
writeOperationNoSize(new BufferedChecksumStreamOutput(out), operation);
final long end = out.position();
final int operationSize = (int) (end - Integer.BYTES - start);
out.seek(start);
out.writeInt(operationSize);
out.seek(end);
final BytesReference bytes = out.bytes();
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
if (operation.primaryTerm() > current.getPrimaryTerm()) {
assert false : "Operation term is newer than the current term; " + "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]";
throw new IllegalArgumentException("Operation term is newer than the current term; " + "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]");
}
return current.add(bytes, operation.seqNo());
}
} catch (final AlreadyClosedException | IOException ex) {
closeOnTragicEvent(ex);
throw ex;
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", ex);
} finally {
Releasables.close(out);
}
}
use of org.opensearch.common.util.concurrent.ReleasableLock in project OpenSearch by opensearch-project.
the class NRTReplicationEngine method rollTranslogGeneration.
@Override
public void rollTranslogGeneration() throws EngineException {
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
translog.rollGeneration();
translog.trimUnreferencedReaders();
} catch (Exception e) {
try {
failEngine("translog trimming failed", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw new EngineException(shardId, "failed to roll translog", e);
}
}
use of org.opensearch.common.util.concurrent.ReleasableLock in project OpenSearch by opensearch-project.
the class InternalEngine method flush.
@Override
public void flush(boolean force, boolean waitIfOngoing) throws EngineException {
ensureOpen();
if (force && waitIfOngoing == false) {
assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing;
throw new IllegalArgumentException("wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing);
}
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
if (flushLock.tryLock() == false) {
// if we can't get the lock right away we block if needed otherwise barf
if (waitIfOngoing == false) {
return;
}
logger.trace("waiting for in-flight flush to finish");
flushLock.lock();
logger.trace("acquired flush lock after blocking");
} else {
logger.trace("acquired flush lock immediately");
}
try {
// Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, or (3) the
// newly created commit points to a different translog generation (can free translog),
// or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries.
boolean hasUncommittedChanges = indexWriter.hasUncommittedChanges();
boolean shouldPeriodicallyFlush = shouldPeriodicallyFlush();
if (hasUncommittedChanges || force || shouldPeriodicallyFlush || getProcessedLocalCheckpoint() > Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))) {
ensureCanFlush();
try {
translog.rollGeneration();
logger.trace("starting commit for flush; commitTranslog=true");
commitIndexWriter(indexWriter, translog);
logger.trace("finished commit for flush");
// a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved
logger.debug("new commit on flush, hasUncommittedChanges:{}, force:{}, shouldPeriodicallyFlush:{}", hasUncommittedChanges, force, shouldPeriodicallyFlush);
// we need to refresh in order to clear older version values
refresh("version_table_flush", SearcherScope.INTERNAL, true);
translog.trimUnreferencedReaders();
} catch (AlreadyClosedException e) {
failOnTragicEvent(e);
throw e;
} catch (Exception e) {
throw new FlushFailedEngineException(shardId, e);
}
refreshLastCommittedSegmentInfos();
}
} catch (FlushFailedEngineException ex) {
maybeFailEngine("flush", ex);
throw ex;
} finally {
flushLock.unlock();
}
}
// (e.g., moves backwards) we will at least still sometimes prune deleted tombstones:
if (engineConfig.isEnableGcDeletes()) {
pruneDeletedTombstones();
}
}
Aggregations