use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class IndexShard method afterWriteOperation.
/**
* Schedules a flush or translog generation roll if needed but will not schedule more than one concurrently. The operation will be
* executed asynchronously on the flush thread pool.
*/
public void afterWriteOperation() {
if (shouldPeriodicallyFlush() || shouldRollTranslogGeneration()) {
if (flushOrRollRunning.compareAndSet(false, true)) {
/*
* We have to check again since otherwise there is a race when a thread passes the first check next to another thread which
* performs the operation quickly enough to finish before the current thread could flip the flag. In that situation, we
* have an extra operation.
*
* Additionally, a flush implicitly executes a translog generation roll so if we execute a flush then we do not need to
* check if we should roll the translog generation.
*/
if (shouldPeriodicallyFlush()) {
logger.debug("submitting async flush request");
final AbstractRunnable flush = new AbstractRunnable() {
@Override
public void onFailure(final Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", e);
}
}
@Override
protected void doRun() throws IOException {
flush(new FlushRequest());
periodicFlushMetric.inc();
}
@Override
public void onAfter() {
flushOrRollRunning.compareAndSet(true, false);
afterWriteOperation();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(flush);
} else if (shouldRollTranslogGeneration()) {
logger.debug("submitting async roll translog generation request");
final AbstractRunnable roll = new AbstractRunnable() {
@Override
public void onFailure(final Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to roll translog generation", e);
}
}
@Override
protected void doRun() throws Exception {
rollTranslogGeneration();
}
@Override
public void onAfter() {
flushOrRollRunning.compareAndSet(true, false);
afterWriteOperation();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(roll);
} else {
flushOrRollRunning.compareAndSet(true, false);
}
}
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class SyncedFlushService method performPreSyncedFlush.
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
LOGGER.trace("{} performing pre sync flush", request.shardId());
indexShard.flush(flushRequest);
final CommitStats commitStats = indexShard.commitStats();
final Engine.CommitId commitId = commitStats.getRawCommitId();
LOGGER.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs());
return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class SyncedFlushService method performNormalFlushOnInactive.
private void performNormalFlushOnInactive(IndexShard shard) {
LOGGER.debug("flushing shard {} on inactive", shard.routingEntry());
shard.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (shard.state() != IndexShardState.CLOSED) {
LOGGER.warn(new ParameterizedMessage("failed to flush shard {} on inactive", shard.routingEntry()), e);
}
}
@Override
protected void doRun() {
shard.flush(new FlushRequest().force(false).waitIfOngoing(false));
}
});
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class PeerRecoveryTargetServiceTests method populateRandomData.
private SeqNoStats populateRandomData(IndexShard shard) throws IOException {
List<Long> seqNos = LongStream.range(0, 100).boxed().collect(Collectors.toList());
Randomness.shuffle(seqNos);
for (long seqNo : seqNos) {
shard.applyIndexOperationOnReplica(seqNo, 1, shard.getOperationPrimaryTerm(), Translog.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(shard.shardId().getIndexName(), UUIDs.randomBase64UUID(), new BytesArray("{}"), XContentType.JSON));
if (randomInt(100) < 5) {
shard.flush(new FlushRequest().waitIfOngoing(true));
}
}
shard.sync();
long globalCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, shard.getLocalCheckpoint());
shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test");
shard.sync();
return shard.seqNoStats();
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project crate by crate.
the class PeerRecoveryTargetServiceTests method testWriteFileChunksConcurrently.
@Test
public void testWriteFileChunksConcurrently() throws Exception {
IndexShard sourceShard = newStartedShard(true);
int numDocs = between(20, 100);
for (int i = 0; i < numDocs; i++) {
indexDoc(sourceShard, "_doc", Integer.toString(i));
}
sourceShard.flush(new FlushRequest());
Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(null);
List<StoreFileMetadata> mdFiles = new ArrayList<>();
for (StoreFileMetadata md : sourceSnapshot) {
mdFiles.add(md);
}
final IndexShard targetShard = newShard(false);
final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId());
final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId());
targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode));
final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null);
final PlainActionFuture<Void> receiveFileInfoFuture = new PlainActionFuture<>();
recoveryTarget.receiveFileInfo(mdFiles.stream().map(StoreFileMetadata::name).collect(Collectors.toList()), mdFiles.stream().map(StoreFileMetadata::length).collect(Collectors.toList()), Collections.emptyList(), Collections.emptyList(), 0, receiveFileInfoFuture);
receiveFileInfoFuture.actionGet(5, TimeUnit.SECONDS);
List<RecoveryFileChunkRequest> requests = new ArrayList<>();
for (StoreFileMetadata md : mdFiles) {
try (IndexInput in = sourceShard.store().directory().openInput(md.name(), IOContext.READONCE)) {
int pos = 0;
while (pos < md.length()) {
int length = between(1, Math.toIntExact(md.length() - pos));
byte[] buffer = new byte[length];
in.readBytes(buffer, 0, length);
requests.add(new RecoveryFileChunkRequest(0, sourceShard.shardId(), md, pos, new BytesArray(buffer), pos + length == md.length(), 1, 1));
pos += length;
}
}
}
Randomness.shuffle(requests);
BlockingQueue<RecoveryFileChunkRequest> queue = new ArrayBlockingQueue<>(requests.size());
queue.addAll(requests);
Thread[] senders = new Thread[between(1, 4)];
CyclicBarrier barrier = new CyclicBarrier(senders.length);
for (int i = 0; i < senders.length; i++) {
senders[i] = new Thread(() -> {
try {
barrier.await();
RecoveryFileChunkRequest r;
while ((r = queue.poll()) != null) {
recoveryTarget.writeFileChunk(r.metadata(), r.position(), r.content(), r.lastChunk(), r.totalTranslogOps(), ActionListener.wrap(ignored -> {
}, e -> {
throw new AssertionError(e);
}));
}
} catch (Exception e) {
throw new AssertionError(e);
}
});
senders[i].start();
}
for (Thread sender : senders) {
sender.join();
}
PlainActionFuture<Void> cleanFilesFuture = new PlainActionFuture<>();
recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), sourceSnapshot, cleanFilesFuture);
cleanFilesFuture.actionGet();
recoveryTarget.decRef();
Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata();
Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot);
assertThat(diff.different, empty());
closeShards(sourceShard, targetShard);
}
Aggregations