use of org.apache.lucene.store.AlreadyClosedException in project crate by crate.
the class Translog method add.
/**
* Adds an operation to the transaction log.
*
* @param operation the operation to add
* @return the location of the operation in the translog
* @throws IOException if adding the operation to the translog resulted in an I/O exception
*/
public Location add(final Operation operation) throws IOException {
final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays);
boolean successfullySerialized = false;
try {
final long start = out.position();
out.skip(Integer.BYTES);
writeOperationNoSize(new BufferedChecksumStreamOutput(out), operation);
final long end = out.position();
final int operationSize = (int) (end - Integer.BYTES - start);
out.seek(start);
out.writeInt(operationSize);
out.seek(end);
successfullySerialized = true;
try (ReleasableBytesReference bytes = new ReleasableBytesReference(out.bytes(), out);
ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
if (operation.primaryTerm() > current.getPrimaryTerm()) {
assert false : "Operation term is newer than the current term; " + "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]";
throw new IllegalArgumentException("Operation term is newer than the current term; " + "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]");
}
return current.add(bytes, operation.seqNo());
}
} catch (final AlreadyClosedException | IOException ex) {
closeOnTragicEvent(ex);
throw ex;
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", ex);
} finally {
if (successfullySerialized == false) {
Releasables.close(out);
}
}
}
use of org.apache.lucene.store.AlreadyClosedException in project crate by crate.
the class TranslogReader method closeIntoTrimmedReader.
/**
* Closes current reader and creates new one with new checkoint and same file channel
*/
TranslogReader closeIntoTrimmedReader(long aboveSeqNo, ChannelFactory channelFactory) throws IOException {
if (closed.compareAndSet(false, true)) {
Closeable toCloseOnFailure = channel;
final TranslogReader newReader;
try {
if (aboveSeqNo < checkpoint.trimmedAboveSeqNo || aboveSeqNo < checkpoint.maxSeqNo && checkpoint.trimmedAboveSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) {
final Path checkpointFile = path.getParent().resolve(getCommitCheckpointFileName(checkpoint.generation));
final Checkpoint newCheckpoint = new Checkpoint(checkpoint.offset, checkpoint.numOps, checkpoint.generation, checkpoint.minSeqNo, checkpoint.maxSeqNo, checkpoint.globalCheckpoint, checkpoint.minTranslogGeneration, aboveSeqNo);
Checkpoint.write(channelFactory, checkpointFile, newCheckpoint, StandardOpenOption.WRITE);
IOUtils.fsync(checkpointFile, false);
IOUtils.fsync(checkpointFile.getParent(), true);
newReader = new TranslogReader(newCheckpoint, channel, path, header);
} else {
newReader = new TranslogReader(checkpoint, channel, path, header);
}
toCloseOnFailure = null;
return newReader;
} finally {
IOUtils.close(toCloseOnFailure);
}
} else {
throw new AlreadyClosedException(toString() + " is already closed");
}
}
use of org.apache.lucene.store.AlreadyClosedException in project crate by crate.
the class ShardSegments method buildShardSegment.
private Stream<ShardSegment> buildShardSegment(IndexShard indexShard) {
try {
List<Segment> segments = indexShard.segments(false);
ShardId shardId = indexShard.shardId();
return segments.stream().map(sgmt -> new ShardSegment(shardId.getId(), shardId.getIndexName(), sgmt, indexShard.routingEntry().primary()));
} catch (AlreadyClosedException ignored) {
return Stream.empty();
}
}
use of org.apache.lucene.store.AlreadyClosedException in project crate by crate.
the class ReservoirSampler method getSamples.
private Samples getSamples(List<Reference> columns, int maxSamples, DocTableInfo docTable, Random random, Metadata metadata, CoordinatorTxnCtx coordinatorTxnCtx, List<Streamer> streamers, List<Engine.Searcher> searchersToRelease, RamAccounting ramAccounting) {
ramAccounting.addBytes(DataTypes.LONG.fixedSize() * maxSamples);
Reservoir<Long> fetchIdSamples = new Reservoir<>(maxSamples, random);
ArrayList<DocIdToRow> docIdToRowsFunctionPerReader = new ArrayList<>();
long totalNumDocs = 0;
long totalSizeInBytes = 0;
for (String index : docTable.concreteOpenIndices()) {
var indexMetadata = metadata.index(index);
if (indexMetadata == null) {
continue;
}
var indexService = indicesService.indexService(indexMetadata.getIndex());
if (indexService == null) {
continue;
}
var mapperService = indexService.mapperService();
FieldTypeLookup fieldTypeLookup = mapperService::fullName;
var ctx = new DocInputFactory(nodeCtx, new LuceneReferenceResolver(indexService.index().getName(), fieldTypeLookup, docTable.partitionedByColumns())).getCtx(coordinatorTxnCtx);
ctx.add(columns);
List<Input<?>> inputs = ctx.topLevelInputs();
List<? extends LuceneCollectorExpression<?>> expressions = ctx.expressions();
CollectorContext collectorContext = new CollectorContext();
for (LuceneCollectorExpression<?> expression : expressions) {
expression.startCollect(collectorContext);
}
for (IndexShard indexShard : indexService) {
if (!indexShard.routingEntry().primary()) {
continue;
}
try {
Engine.Searcher searcher = indexShard.acquireSearcher("update-table-statistics");
searchersToRelease.add(searcher);
totalNumDocs += searcher.getIndexReader().numDocs();
totalSizeInBytes += indexShard.storeStats().getSizeInBytes();
DocIdToRow docIdToRow = new DocIdToRow(searcher, inputs, expressions);
docIdToRowsFunctionPerReader.add(docIdToRow);
try {
// We do the sampling in 2 phases. First we get the docIds;
// then we retrieve the column values for the sampled docIds.
// we do this in 2 phases because the reservoir sampling might override previously seen
// items and we want to avoid unnecessary disk-lookup
var collector = new ReservoirCollector(fetchIdSamples, searchersToRelease.size() - 1);
searcher.search(new MatchAllDocsQuery(), collector);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} catch (IllegalIndexShardStateException | AlreadyClosedException ignored) {
}
}
}
var rowAccounting = new RowCellsAccountingWithEstimators(Symbols.typeView(columns), ramAccounting, 0);
ArrayList<Row> records = new ArrayList<>();
for (long fetchId : fetchIdSamples.samples()) {
int readerId = FetchId.decodeReaderId(fetchId);
DocIdToRow docIdToRow = docIdToRowsFunctionPerReader.get(readerId);
Object[] row = docIdToRow.apply(FetchId.decodeDocId(fetchId));
try {
rowAccounting.accountForAndMaybeBreak(row);
} catch (CircuitBreakingException e) {
LOGGER.info("Stopped gathering samples for `ANALYZE` operation because circuit breaker triggered. " + "Generating statistics with {} instead of {} records", records.size(), maxSamples);
break;
}
records.add(new RowN(row));
}
return new Samples(records, streamers, totalNumDocs, totalSizeInBytes);
}
use of org.apache.lucene.store.AlreadyClosedException in project crate by crate.
the class IndexShard method resetEngineToGlobalCheckpoint.
/**
* Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
*/
void resetEngineToGlobalCheckpoint() throws IOException {
assert Thread.holdsLock(mutex) == false : "resetting engine under mutex";
assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "resetting engine without blocking operations; active operations are [" + getActiveOperations() + ']';
// persist the global checkpoint to disk
sync();
final SeqNoStats seqNoStats = seqNoStats();
final TranslogStats translogStats = translogStats();
// flush to make sure the latest commit, which will be opened by the read-only engine, includes all operations.
flush(new FlushRequest().waitIfOngoing(true));
SetOnce<Engine> newEngineReference = new SetOnce<>();
final long globalCheckpoint = getLastKnownGlobalCheckpoint();
assert globalCheckpoint == getLastSyncedGlobalCheckpoint();
synchronized (engineMutex) {
verifyNotClosed();
// we must create both new read-only engine and new read-write engine under engineMutex to ensure snapshotStoreMetadata,
// acquireXXXCommit and close works.
final Engine readOnlyEngine = new ReadOnlyEngine(newEngineConfig(replicationTracker), seqNoStats, translogStats, false, Function.identity()) {
@Override
public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
// ignore flushFirst since we flushed above and we do not want to interfere with ongoing translog replay
return newEngineReference.get().acquireLastIndexCommit(false);
}
}
@Override
public IndexCommitRef acquireSafeIndexCommit() {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
return newEngineReference.get().acquireSafeIndexCommit();
}
}
@Override
public void close() throws IOException {
assert Thread.holdsLock(engineMutex);
Engine newEngine = newEngineReference.get();
if (newEngine == currentEngineReference.get()) {
// we successfully installed the new engine so do not close it.
newEngine = null;
}
IOUtils.close(super::close, newEngine);
}
};
IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine));
newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker)));
onNewEngine(newEngineReference.get());
}
final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {
// TODO: add a dedicate recovery stats for the reset translog
});
newEngineReference.get().recoverFromTranslog(translogRunner, globalCheckpoint);
newEngineReference.get().refresh("reset_engine");
synchronized (engineMutex) {
verifyNotClosed();
IOUtils.close(currentEngineReference.getAndSet(newEngineReference.get()));
// We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
onSettingsChanged();
}
Aggregations