use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class BroadcastReplicationTests method assertImmediateResponse.
public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) throws InterruptedException, ExecutionException {
Date beginDate = new Date();
FlushResponse flushResponse = flushAction.execute(new FlushRequest(index)).get();
Date endDate = new Date();
long maxTime = 500;
assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime));
return flushResponse;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class PeerRecoveryTargetServiceTests method testGetStartingSeqNo.
public void testGetStartingSeqNo() throws Exception {
IndexShard replica = newShard(false);
RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
try {
recoveryEmptyReplica(replica);
int docs = randomIntBetween(1, 10);
final String index = replica.shardId().getIndexName();
long seqNo = 0;
for (int i = 0; i < docs; i++) {
Engine.Index indexOp = replica.prepareIndexOnReplica(SourceToParse.source(SourceToParse.Origin.REPLICA, index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON), seqNo++, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
replica.index(indexOp);
if (rarely()) {
// insert a gap
seqNo++;
}
}
final long maxSeqNo = replica.seqNoStats().getMaxSeqNo();
final long localCheckpoint = replica.getLocalCheckpoint();
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
replica.updateGlobalCheckpointOnReplica(maxSeqNo - 1);
replica.getTranslog().sync();
// commit is enough, global checkpoint is below max *committed* which is NO_OPS_PERFORMED
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L));
replica.flush(new FlushRequest());
// commit is still not good enough, global checkpoint is below max
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
replica.updateGlobalCheckpointOnReplica(maxSeqNo);
replica.getTranslog().sync();
// commit is enough, global checkpoint is below max
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1));
} finally {
closeShards(replica);
recoveryTarget.decRef();
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class SyncedFlushService method performPreSyncedFlush.
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId());
Engine.CommitId commitId = indexShard.flush(flushRequest);
logger.trace("{} pre sync flush done. commit id {}", request.shardId(), commitId);
return new PreSyncedFlushResponse(commitId);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class IndexShard method maybeFlush.
/**
* Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the
* Flush thread-pool asynchronously.
*
* @return <code>true</code> if a new flush is scheduled otherwise <code>false</code>.
*/
public boolean maybeFlush() {
if (shouldFlush()) {
if (asyncFlushRunning.compareAndSet(false, true)) {
// we can't use a lock here since we "release" in a different thread
if (shouldFlush() == false) {
// we have to check again since otherwise there is a race when a thread passes
// the first shouldFlush() check next to another thread which flushes fast enough
// to finish before the current thread could flip the asyncFlushRunning flag.
// in that situation we have an extra unexpected flush.
asyncFlushRunning.compareAndSet(true, false);
} else {
logger.debug("submitting async flush request");
final AbstractRunnable abstractRunnable = new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", e);
}
}
@Override
protected void doRun() throws Exception {
flush(new FlushRequest());
}
@Override
public void onAfter() {
asyncFlushRunning.compareAndSet(true, false);
// fire a flush up again if we have filled up the limits such that shouldFlush() returns true
maybeFlush();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(abstractRunnable);
return true;
}
}
}
return false;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class RecoveryDuringReplicationTests method testRecoveryAfterPrimaryPromotion.
@TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE")
public void testRecoveryAfterPrimaryPromotion() throws Exception {
try (ReplicationGroup shards = createGroup(2)) {
shards.startAll();
int totalDocs = shards.indexDocs(randomInt(10));
int committedDocs = 0;
if (randomBoolean()) {
shards.flush();
committedDocs = totalDocs;
}
// we need some indexing to happen to transfer local checkpoint information to the primary
// so it can update the global checkpoint and communicate to replicas
boolean expectSeqNoRecovery = totalDocs > 0;
final IndexShard oldPrimary = shards.getPrimary();
final IndexShard newPrimary = shards.getReplicas().get(0);
final IndexShard replica = shards.getReplicas().get(1);
if (randomBoolean()) {
// simulate docs that were inflight when primary failed, these will be rolled back
final int rollbackDocs = randomIntBetween(1, 5);
logger.info("--> indexing {} rollback docs", rollbackDocs);
for (int i = 0; i < rollbackDocs; i++) {
final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "rollback_" + i).source("{}", XContentType.JSON);
final IndexResponse primaryResponse = indexOnPrimary(indexRequest, oldPrimary);
indexOnReplica(primaryResponse, indexRequest, replica);
}
if (randomBoolean()) {
oldPrimary.flush(new FlushRequest(index.getName()));
expectSeqNoRecovery = false;
}
}
shards.promoteReplicaToPrimary(newPrimary);
// index some more
totalDocs += shards.indexDocs(randomIntBetween(0, 5));
oldPrimary.close("demoted", false);
oldPrimary.store().close();
IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId());
shards.recoverReplica(newReplica);
if (expectSeqNoRecovery) {
assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty());
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs));
} else {
assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty()));
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs));
}
shards.removeReplica(replica);
replica.close("resync", false);
replica.store().close();
newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId());
shards.recoverReplica(newReplica);
shards.assertAllEqual(totalDocs);
}
}
Aggregations