use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class TransportVerifyShardBeforeCloseAction method executeShardOperation.
private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) throws IOException {
final ShardId shardId = indexShard.shardId();
if (indexShard.getActiveOperationsCount() != IndexShard.OPERATIONS_BLOCKED) {
throw new IllegalStateException("Index shard " + shardId + " is not blocking all operations during closing");
}
final ClusterBlocks clusterBlocks = clusterService.state().blocks();
if (clusterBlocks.hasIndexBlock(shardId.getIndexName(), request.clusterBlock()) == false) {
throw new IllegalStateException("Index shard " + shardId + " must be blocked by " + request.clusterBlock() + " before closing");
}
if (request.isPhase1()) {
// in order to advance the global checkpoint to the maximum sequence number, the (persisted) local checkpoint needs to be
// advanced first, which, when using async translog syncing, does not automatically hold at the time where we have acquired
// all operation permits. Instead, this requires and explicit sync, which communicates the updated (persisted) local checkpoint
// to the primary (we call this phase1), and phase2 can then use the fact that the global checkpoint has moved to the maximum
// sequence number to pass the verifyShardBeforeIndexClosing check and create a safe commit where the maximum sequence number
// is equal to the global checkpoint.
indexShard.sync();
} else {
indexShard.verifyShardBeforeIndexClosing();
indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true));
logger.trace("{} shard is ready for closing", shardId);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class IndexShard method resetEngineToGlobalCheckpoint.
/**
* Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
*/
void resetEngineToGlobalCheckpoint() throws IOException {
assert Thread.holdsLock(mutex) == false : "resetting engine under mutex";
assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "resetting engine without blocking operations; active operations are [" + getActiveOperations() + ']';
// persist the global checkpoint to disk
sync();
final SeqNoStats seqNoStats = seqNoStats();
final TranslogStats translogStats = translogStats();
// flush to make sure the latest commit, which will be opened by the read-only engine, includes all operations.
flush(new FlushRequest().waitIfOngoing(true));
SetOnce<Engine> newEngineReference = new SetOnce<>();
final long globalCheckpoint = getLastKnownGlobalCheckpoint();
assert globalCheckpoint == getLastSyncedGlobalCheckpoint();
synchronized (engineMutex) {
verifyNotClosed();
// we must create both new read-only engine and new read-write engine under engineMutex to ensure snapshotStoreMetadata,
// acquireXXXCommit and close works.
final Engine readOnlyEngine = new ReadOnlyEngine(newEngineConfig(replicationTracker), seqNoStats, translogStats, false, Function.identity()) {
@Override
public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
// ignore flushFirst since we flushed above and we do not want to interfere with ongoing translog replay
return newEngineReference.get().acquireLastIndexCommit(false);
}
}
@Override
public IndexCommitRef acquireSafeIndexCommit() {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
return newEngineReference.get().acquireSafeIndexCommit();
}
}
@Override
public void close() throws IOException {
assert Thread.holdsLock(engineMutex);
Engine newEngine = newEngineReference.get();
if (newEngine == currentEngineReference.get()) {
// we successfully installed the new engine so do not close it.
newEngine = null;
}
IOUtils.close(super::close, newEngine);
}
};
IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine));
newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker)));
onNewEngine(newEngineReference.get());
}
final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {
// TODO: add a dedicate recovery stats for the reset translog
});
newEngineReference.get().recoverFromTranslog(translogRunner, globalCheckpoint);
newEngineReference.get().refresh("reset_engine");
synchronized (engineMutex) {
verifyNotClosed();
IOUtils.close(currentEngineReference.getAndSet(newEngineReference.get()));
// We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
onSettingsChanged();
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project graylog2-server by Graylog2.
the class IndicesAdapterES7 method flush.
@Override
public void flush(String index) {
final FlushRequest request = new FlushRequest(index);
client.execute((c, requestOptions) -> c.indices().flush(request, requestOptions), "Unable to flush index " + index);
}
Aggregations