Search in sources :

Example 1 with FlushRequest

use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.

the class BroadcastReplicationTests method assertImmediateResponse.

public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) throws InterruptedException, ExecutionException {
    Date beginDate = new Date();
    FlushResponse flushResponse = flushAction.execute(new FlushRequest(index)).get();
    Date endDate = new Date();
    long maxTime = 500;
    assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime));
    return flushResponse;
}
Also used : FlushResponse(org.elasticsearch.action.admin.indices.flush.FlushResponse) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) Date(java.util.Date)

Example 2 with FlushRequest

use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.

the class PeerRecoveryTargetServiceTests method testGetStartingSeqNo.

public void testGetStartingSeqNo() throws Exception {
    IndexShard replica = newShard(false);
    RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
    try {
        recoveryEmptyReplica(replica);
        int docs = randomIntBetween(1, 10);
        final String index = replica.shardId().getIndexName();
        long seqNo = 0;
        for (int i = 0; i < docs; i++) {
            Engine.Index indexOp = replica.prepareIndexOnReplica(SourceToParse.source(SourceToParse.Origin.REPLICA, index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON), seqNo++, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
            replica.index(indexOp);
            if (rarely()) {
                // insert a gap
                seqNo++;
            }
        }
        final long maxSeqNo = replica.seqNoStats().getMaxSeqNo();
        final long localCheckpoint = replica.getLocalCheckpoint();
        assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
        replica.updateGlobalCheckpointOnReplica(maxSeqNo - 1);
        replica.getTranslog().sync();
        // commit is enough, global checkpoint is below max *committed* which is NO_OPS_PERFORMED
        assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L));
        replica.flush(new FlushRequest());
        // commit is still not good enough, global checkpoint is below max
        assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
        replica.updateGlobalCheckpointOnReplica(maxSeqNo);
        replica.getTranslog().sync();
        // commit is enough, global checkpoint is below max
        assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1));
    } finally {
        closeShards(replica);
        recoveryTarget.decRef();
    }
}
Also used : BytesArray(org.elasticsearch.common.bytes.BytesArray) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) IndexShard(org.elasticsearch.index.shard.IndexShard) Engine(org.elasticsearch.index.engine.Engine)

Example 3 with FlushRequest

use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.

the class SyncedFlushService method performPreSyncedFlush.

private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
    IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
    FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
    logger.trace("{} performing pre sync flush", request.shardId());
    Engine.CommitId commitId = indexShard.flush(flushRequest);
    logger.trace("{} pre sync flush done. commit id {}", request.shardId(), commitId);
    return new PreSyncedFlushResponse(commitId);
}
Also used : FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) IndexShard(org.elasticsearch.index.shard.IndexShard) Engine(org.elasticsearch.index.engine.Engine)

Example 4 with FlushRequest

use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.

the class IndexShard method maybeFlush.

/**
     * Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the
     * Flush thread-pool asynchronously.
     *
     * @return <code>true</code> if a new flush is scheduled otherwise <code>false</code>.
     */
public boolean maybeFlush() {
    if (shouldFlush()) {
        if (asyncFlushRunning.compareAndSet(false, true)) {
            // we can't use a lock here since we "release" in a different thread
            if (shouldFlush() == false) {
                // we have to check again since otherwise there is a race when a thread passes
                // the first shouldFlush() check next to another thread which flushes fast enough
                // to finish before the current thread could flip the asyncFlushRunning flag.
                // in that situation we have an extra unexpected flush.
                asyncFlushRunning.compareAndSet(true, false);
            } else {
                logger.debug("submitting async flush request");
                final AbstractRunnable abstractRunnable = new AbstractRunnable() {

                    @Override
                    public void onFailure(Exception e) {
                        if (state != IndexShardState.CLOSED) {
                            logger.warn("failed to flush index", e);
                        }
                    }

                    @Override
                    protected void doRun() throws Exception {
                        flush(new FlushRequest());
                    }

                    @Override
                    public void onAfter() {
                        asyncFlushRunning.compareAndSet(true, false);
                        // fire a flush up again if we have filled up the limits such that shouldFlush() returns true
                        maybeFlush();
                    }
                };
                threadPool.executor(ThreadPool.Names.FLUSH).execute(abstractRunnable);
                return true;
            }
        }
    }
    return false;
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) ThreadInterruptedException(org.apache.lucene.util.ThreadInterruptedException) RecoveryFailedException(org.elasticsearch.indices.recovery.RecoveryFailedException) EngineException(org.elasticsearch.index.engine.EngineException) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) NoSuchFileException(java.nio.file.NoSuchFileException) TimeoutException(java.util.concurrent.TimeoutException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) RefreshFailedEngineException(org.elasticsearch.index.engine.RefreshFailedEngineException) FileNotFoundException(java.io.FileNotFoundException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException)

Example 5 with FlushRequest

use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.

the class RecoveryDuringReplicationTests method testRecoveryAfterPrimaryPromotion.

@TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE")
public void testRecoveryAfterPrimaryPromotion() throws Exception {
    try (ReplicationGroup shards = createGroup(2)) {
        shards.startAll();
        int totalDocs = shards.indexDocs(randomInt(10));
        int committedDocs = 0;
        if (randomBoolean()) {
            shards.flush();
            committedDocs = totalDocs;
        }
        // we need some indexing to happen to transfer local checkpoint information to the primary
        // so it can update the global checkpoint and communicate to replicas
        boolean expectSeqNoRecovery = totalDocs > 0;
        final IndexShard oldPrimary = shards.getPrimary();
        final IndexShard newPrimary = shards.getReplicas().get(0);
        final IndexShard replica = shards.getReplicas().get(1);
        if (randomBoolean()) {
            // simulate docs that were inflight when primary failed, these will be rolled back
            final int rollbackDocs = randomIntBetween(1, 5);
            logger.info("--> indexing {} rollback docs", rollbackDocs);
            for (int i = 0; i < rollbackDocs; i++) {
                final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "rollback_" + i).source("{}", XContentType.JSON);
                final IndexResponse primaryResponse = indexOnPrimary(indexRequest, oldPrimary);
                indexOnReplica(primaryResponse, indexRequest, replica);
            }
            if (randomBoolean()) {
                oldPrimary.flush(new FlushRequest(index.getName()));
                expectSeqNoRecovery = false;
            }
        }
        shards.promoteReplicaToPrimary(newPrimary);
        // index some more
        totalDocs += shards.indexDocs(randomIntBetween(0, 5));
        oldPrimary.close("demoted", false);
        oldPrimary.store().close();
        IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId());
        shards.recoverReplica(newReplica);
        if (expectSeqNoRecovery) {
            assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty());
            assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs));
        } else {
            assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty()));
            assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs));
        }
        shards.removeReplica(replica);
        replica.close("resync", false);
        replica.store().close();
        newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId());
        shards.recoverReplica(newReplica);
        shards.assertAllEqual(totalDocs);
    }
}
Also used : IndexResponse(org.elasticsearch.action.index.IndexResponse) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) IndexShard(org.elasticsearch.index.shard.IndexShard) IndexRequest(org.elasticsearch.action.index.IndexRequest) TestLogging(org.elasticsearch.test.junit.annotations.TestLogging)

Aggregations

FlushRequest (org.elasticsearch.action.admin.indices.flush.FlushRequest)22 IOException (java.io.IOException)7 Engine (org.elasticsearch.index.engine.Engine)6 IndexShard (org.elasticsearch.index.shard.IndexShard)5 ClosedByInterruptException (java.nio.channels.ClosedByInterruptException)3 ArrayList (java.util.ArrayList)3 TimeoutException (java.util.concurrent.TimeoutException)3 ElasticsearchException (org.elasticsearch.ElasticsearchException)3 ForceMergeRequest (org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest)3 BytesArray (org.elasticsearch.common.bytes.BytesArray)3 Settings (org.elasticsearch.common.settings.Settings)3 AbstractRunnable (org.elasticsearch.common.util.concurrent.AbstractRunnable)3 IndexNotFoundException (org.elasticsearch.index.IndexNotFoundException)3 Term (org.apache.lucene.index.Term)2 AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)2 ThreadInterruptedException (org.apache.lucene.util.ThreadInterruptedException)2 UpgradeRequest (org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest)2 EngineException (org.elasticsearch.index.engine.EngineException)2 RefreshFailedEngineException (org.elasticsearch.index.engine.RefreshFailedEngineException)2 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)2