Search in sources :

Example 1 with BulkShardRequest

use of org.opensearch.action.bulk.BulkShardRequest in project OpenSearch by opensearch-project.

the class IndexingPressureServiceTests method testCoordinatingOperationForShardIndexingPressure.

public void testCoordinatingOperationForShardIndexingPressure() {
    IndexingPressureService service = new IndexingPressureService(settings, clusterService);
    Index index = new Index("IndexName", "UUID");
    ShardId shardId = new ShardId(index, 0);
    BulkItemRequest[] items = new BulkItemRequest[1];
    DocWriteRequest<IndexRequest> writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar");
    items[0] = new BulkItemRequest(0, writeRequest);
    BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.NONE, items);
    Releasable releasable = service.markCoordinatingOperationStarted(shardId, bulkShardRequest::ramBytesUsed, false);
    IndexingPressurePerShardStats shardStats = service.shardStats(CommonStatsFlags.ALL).getIndexingPressureShardStats(shardId);
    assertEquals(bulkShardRequest.ramBytesUsed(), shardStats.getCurrentCoordinatingBytes());
    releasable.close();
}
Also used : ShardId(org.opensearch.index.shard.ShardId) BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) BulkItemRequest(org.opensearch.action.bulk.BulkItemRequest) Releasable(org.opensearch.common.lease.Releasable) IndexRequest(org.opensearch.action.index.IndexRequest) IndexingPressurePerShardStats(org.opensearch.index.stats.IndexingPressurePerShardStats)

Example 2 with BulkShardRequest

use of org.opensearch.action.bulk.BulkShardRequest in project OpenSearch by opensearch-project.

the class IndexLevelReplicationTests method testReplicaOperationWithConcurrentPrimaryPromotion.

public void testReplicaOperationWithConcurrentPrimaryPromotion() throws Exception {
    Map<String, String> mappings = Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
    try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetadata(1, mappings))) {
        shards.startAll();
        long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm();
        IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON);
        BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary());
        List<IndexShard> replicas = shards.getReplicas();
        IndexShard replica = replicas.get(0);
        CyclicBarrier barrier = new CyclicBarrier(2);
        AtomicBoolean successFullyIndexed = new AtomicBoolean();
        Thread t1 = new Thread(() -> {
            try {
                barrier.await();
                indexOnReplica(replicationRequest, shards, replica, primaryPrimaryTerm);
                successFullyIndexed.set(true);
            } catch (IllegalStateException ise) {
                assertThat(ise.getMessage(), either(containsString("is too old")).or(containsString("cannot be a replication target")).or(containsString("engine is closed")));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        });
        Thread t2 = new Thread(() -> {
            try {
                barrier.await();
                shards.promoteReplicaToPrimary(replica).get();
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        });
        t2.start();
        t1.start();
        t1.join();
        t2.join();
        assertEquals(primaryPrimaryTerm + 1, replica.getPendingPrimaryTerm());
        if (successFullyIndexed.get()) {
            try (Translog.Snapshot snapshot = getTranslog(replica).newSnapshot()) {
                assertThat(snapshot.totalOperations(), equalTo(1));
                Translog.Operation op = snapshot.next();
                assertThat(op.primaryTerm(), equalTo(primaryPrimaryTerm));
            }
        }
    }
}
Also used : BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) IndexShard(org.opensearch.index.shard.IndexShard) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexRequest(org.opensearch.action.index.IndexRequest) VersionConflictEngineException(org.opensearch.index.engine.VersionConflictEngineException) IOException(java.io.IOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) Translog(org.opensearch.index.translog.Translog) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 3 with BulkShardRequest

use of org.opensearch.action.bulk.BulkShardRequest in project OpenSearch by opensearch-project.

the class IndexLevelReplicationTests method testReplicaTermIncrementWithConcurrentPrimaryPromotion.

public void testReplicaTermIncrementWithConcurrentPrimaryPromotion() throws Exception {
    Map<String, String> mappings = Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
    try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetadata(2, mappings))) {
        shards.startAll();
        long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm();
        List<IndexShard> replicas = shards.getReplicas();
        IndexShard replica1 = replicas.get(0);
        IndexShard replica2 = replicas.get(1);
        shards.promoteReplicaToPrimary(replica1, (shard, listener) -> {
        });
        long newReplica1Term = replica1.getPendingPrimaryTerm();
        assertEquals(primaryPrimaryTerm + 1, newReplica1Term);
        assertEquals(primaryPrimaryTerm, replica2.getPendingPrimaryTerm());
        IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON);
        BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, replica1);
        CyclicBarrier barrier = new CyclicBarrier(2);
        Thread t1 = new Thread(() -> {
            try {
                barrier.await();
                indexOnReplica(replicationRequest, shards, replica2, newReplica1Term);
            } catch (IllegalStateException ise) {
                assertThat(ise.getMessage(), either(containsString("is too old")).or(containsString("cannot be a replication target")).or(containsString("engine is closed")));
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        });
        Thread t2 = new Thread(() -> {
            try {
                barrier.await();
                shards.promoteReplicaToPrimary(replica2).get();
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        });
        t2.start();
        t1.start();
        t1.join();
        t2.join();
        assertEquals(newReplica1Term + 1, replica2.getPendingPrimaryTerm());
    }
}
Also used : BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) IndexShard(org.opensearch.index.shard.IndexShard) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexRequest(org.opensearch.action.index.IndexRequest) VersionConflictEngineException(org.opensearch.index.engine.VersionConflictEngineException) IOException(java.io.IOException) CyclicBarrier(java.util.concurrent.CyclicBarrier)

Example 4 with BulkShardRequest

use of org.opensearch.action.bulk.BulkShardRequest in project OpenSearch by opensearch-project.

the class RecoveryDuringReplicationTests method testResyncAfterPrimaryPromotion.

public void testResyncAfterPrimaryPromotion() throws Exception {
    Map<String, String> mappings = Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
    try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetadata(2, mappings))) {
        shards.startAll();
        int initialDocs = randomInt(10);
        for (int i = 0; i < initialDocs; i++) {
            final IndexRequest indexRequest = new IndexRequest(index.getName()).id("initial_doc_" + i).source("{ \"f\": \"normal\"}", XContentType.JSON);
            shards.index(indexRequest);
        }
        boolean syncedGlobalCheckPoint = randomBoolean();
        if (syncedGlobalCheckPoint) {
            shards.syncGlobalCheckpoint();
        }
        final IndexShard oldPrimary = shards.getPrimary();
        final IndexShard newPrimary = shards.getReplicas().get(0);
        final IndexShard justReplica = shards.getReplicas().get(1);
        // simulate docs that were inflight when primary failed
        final int extraDocs = randomInt(5);
        logger.info("--> indexing {} extra docs", extraDocs);
        for (int i = 0; i < extraDocs; i++) {
            final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_doc_" + i).source("{ \"f\": \"normal\"}", XContentType.JSON);
            final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary);
            indexOnReplica(bulkShardRequest, shards, newPrimary);
        }
        final int extraDocsToBeTrimmed = randomIntBetween(0, 10);
        logger.info("--> indexing {} extra docs to be trimmed", extraDocsToBeTrimmed);
        for (int i = 0; i < extraDocsToBeTrimmed; i++) {
            final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_trimmed_" + i).source("{ \"f\": \"trimmed\"}", XContentType.JSON);
            final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary);
            // have to replicate to another replica != newPrimary one - the subject to trim
            indexOnReplica(bulkShardRequest, shards, justReplica);
        }
        logger.info("--> resyncing replicas seqno_stats primary {} replica {}", oldPrimary.seqNoStats(), newPrimary.seqNoStats());
        PrimaryReplicaSyncer.ResyncTask task = shards.promoteReplicaToPrimary(newPrimary).get();
        if (syncedGlobalCheckPoint) {
            assertEquals(extraDocs, task.getResyncedOperations());
        } else {
            assertThat(task.getResyncedOperations(), greaterThanOrEqualTo(extraDocs));
        }
        shards.assertAllEqual(initialDocs + extraDocs);
        for (IndexShard replica : shards.getReplicas()) {
            assertThat(replica.getMaxSeqNoOfUpdatesOrDeletes(), greaterThanOrEqualTo(shards.getPrimary().getMaxSeqNoOfUpdatesOrDeletes()));
        }
        // check translog on replica is trimmed
        int translogOperations = 0;
        try (Translog.Snapshot snapshot = getTranslog(justReplica).newSnapshot()) {
            Translog.Operation next;
            while ((next = snapshot.next()) != null) {
                translogOperations++;
                assertThat("unexpected op: " + next, (int) next.seqNo(), lessThan(initialDocs + extraDocs));
                assertThat("unexpected primaryTerm: " + next.primaryTerm(), next.primaryTerm(), is(oldPrimary.getPendingPrimaryTerm()));
                final Translog.Source source = next.getSource();
                assertThat(source.source.utf8ToString(), is("{ \"f\": \"normal\"}"));
            }
        }
        assertThat(translogOperations, either(equalTo(initialDocs + extraDocs)).or(equalTo(task.getResyncedOperations())));
    }
}
Also used : BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) IndexShard(org.opensearch.index.shard.IndexShard) IndexRequest(org.opensearch.action.index.IndexRequest) Translog(org.opensearch.index.translog.Translog) PrimaryReplicaSyncer(org.opensearch.index.shard.PrimaryReplicaSyncer)

Example 5 with BulkShardRequest

use of org.opensearch.action.bulk.BulkShardRequest in project OpenSearch by opensearch-project.

the class RecoveryDuringReplicationTests method testRollbackOnPromotion.

public void testRollbackOnPromotion() throws Exception {
    try (ReplicationGroup shards = createGroup(between(2, 3))) {
        shards.startAll();
        IndexShard newPrimary = randomFrom(shards.getReplicas());
        int initDocs = shards.indexDocs(randomInt(100));
        int inFlightOpsOnNewPrimary = 0;
        int inFlightOps = scaledRandomIntBetween(10, 200);
        for (int i = 0; i < inFlightOps; i++) {
            String id = "extra-" + i;
            IndexRequest primaryRequest = new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON);
            BulkShardRequest replicationRequest = indexOnPrimary(primaryRequest, shards.getPrimary());
            for (IndexShard replica : shards.getReplicas()) {
                if (randomBoolean()) {
                    indexOnReplica(replicationRequest, shards, replica);
                    if (replica == newPrimary) {
                        inFlightOpsOnNewPrimary++;
                    }
                }
            }
            if (randomBoolean()) {
                shards.syncGlobalCheckpoint();
            }
            if (rarely()) {
                shards.flush();
            }
        }
        shards.refresh("test");
        List<DocIdSeqNoAndSource> docsBelowGlobalCheckpoint = EngineTestCase.getDocIds(getEngine(newPrimary), randomBoolean()).stream().filter(doc -> doc.getSeqNo() <= newPrimary.getLastKnownGlobalCheckpoint()).collect(Collectors.toList());
        CountDownLatch latch = new CountDownLatch(1);
        final AtomicBoolean done = new AtomicBoolean();
        Thread thread = new Thread(() -> {
            List<IndexShard> replicas = new ArrayList<>(shards.getReplicas());
            replicas.remove(newPrimary);
            latch.countDown();
            while (done.get() == false) {
                try {
                    List<DocIdSeqNoAndSource> exposedDocs = EngineTestCase.getDocIds(getEngine(randomFrom(replicas)), randomBoolean());
                    assertThat(docsBelowGlobalCheckpoint, everyItem(is(in(exposedDocs))));
                    assertThat(randomFrom(replicas).getLocalCheckpoint(), greaterThanOrEqualTo(initDocs - 1L));
                } catch (AlreadyClosedException ignored) {
                // replica swaps engine during rollback
                } catch (Exception e) {
                    throw new AssertionError(e);
                }
            }
        });
        thread.start();
        latch.await();
        shards.promoteReplicaToPrimary(newPrimary).get();
        shards.assertAllEqual(initDocs + inFlightOpsOnNewPrimary);
        int moreDocsAfterRollback = shards.indexDocs(scaledRandomIntBetween(1, 20));
        shards.assertAllEqual(initDocs + inFlightOpsOnNewPrimary + moreDocsAfterRollback);
        done.set(true);
        thread.join();
        shards.syncGlobalCheckpoint();
        for (IndexShard shard : shards) {
            shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
            assertThat(shard.translogStats().getUncommittedOperations(), equalTo(0));
        }
    }
}
Also used : BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) SequenceNumbers(org.opensearch.index.seqno.SequenceNumbers) Matchers.either(org.hamcrest.Matchers.either) IndexableField(org.apache.lucene.index.IndexableField) Matchers.not(org.hamcrest.Matchers.not) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Version(org.opensearch.Version) ForceMergeRequest(org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest) PrimaryReplicaSyncer(org.opensearch.index.shard.PrimaryReplicaSyncer) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) IndexShardTestCase(org.opensearch.index.shard.IndexShardTestCase) Future(java.util.concurrent.Future) Matchers.everyItem(org.hamcrest.Matchers.everyItem) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) RecoveryState(org.opensearch.indices.recovery.RecoveryState) Map(java.util.Map) ActionListener(org.opensearch.action.ActionListener) EnumSet(java.util.EnumSet) DeleteRequest(org.opensearch.action.delete.DeleteRequest) EngineTestCase(org.opensearch.index.engine.EngineTestCase) BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) Settings(org.opensearch.common.settings.Settings) Store(org.opensearch.index.store.Store) Collectors(java.util.stream.Collectors) Engine(org.opensearch.index.engine.Engine) CountDownLatch(java.util.concurrent.CountDownLatch) IndexWriter(org.apache.lucene.index.IndexWriter) VersionType(org.opensearch.index.VersionType) List(java.util.List) Logger(org.apache.logging.log4j.Logger) BytesArray(org.opensearch.common.bytes.BytesArray) Matchers.equalTo(org.hamcrest.Matchers.equalTo) IndexSettings(org.opensearch.index.IndexSettings) Optional(java.util.Optional) XContentType(org.opensearch.common.xcontent.XContentType) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Matchers.is(org.hamcrest.Matchers.is) Matchers.anyOf(org.hamcrest.Matchers.anyOf) Matchers.in(org.hamcrest.Matchers.in) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) FlushRequest(org.opensearch.action.admin.indices.flush.FlushRequest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Releasable(org.opensearch.common.lease.Releasable) EngineConfig(org.opensearch.index.engine.EngineConfig) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) InternalEngineTests(org.opensearch.index.engine.InternalEngineTests) DocIdSeqNoAndSource(org.opensearch.index.engine.DocIdSeqNoAndSource) SourceToParse(org.opensearch.index.mapper.SourceToParse) IndexShard(org.opensearch.index.shard.IndexShard) PeerRecoveryTargetService(org.opensearch.indices.recovery.PeerRecoveryTargetService) Matchers.lessThan(org.hamcrest.Matchers.lessThan) Translog(org.opensearch.index.translog.Translog) EngineFactory(org.opensearch.index.engine.EngineFactory) RetentionLease(org.opensearch.index.seqno.RetentionLease) InternalEngineFactory(org.opensearch.index.engine.InternalEngineFactory) Versions(org.opensearch.common.lucene.uid.Versions) Matchers.empty(org.hamcrest.Matchers.empty) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) RecoveryTarget(org.opensearch.indices.recovery.RecoveryTarget) IOException(java.io.IOException) ShardRouting(org.opensearch.cluster.routing.ShardRouting) TimeUnit(java.util.concurrent.TimeUnit) RetentionLeases(org.opensearch.index.seqno.RetentionLeases) IndexRequest(org.opensearch.action.index.IndexRequest) Collections(java.util.Collections) IndexShard(org.opensearch.index.shard.IndexShard) ArrayList(java.util.ArrayList) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IndexRequest(org.opensearch.action.index.IndexRequest) CountDownLatch(java.util.concurrent.CountDownLatch) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FlushRequest(org.opensearch.action.admin.indices.flush.FlushRequest) DocIdSeqNoAndSource(org.opensearch.index.engine.DocIdSeqNoAndSource)

Aggregations

BulkShardRequest (org.opensearch.action.bulk.BulkShardRequest)15 IndexRequest (org.opensearch.action.index.IndexRequest)14 IndexShard (org.opensearch.index.shard.IndexShard)13 Translog (org.opensearch.index.translog.Translog)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 Matchers.containsString (org.hamcrest.Matchers.containsString)4 DeleteRequest (org.opensearch.action.delete.DeleteRequest)3 Collections (java.util.Collections)2 List (java.util.List)2 Map (java.util.Map)2 CyclicBarrier (java.util.concurrent.CyclicBarrier)2 Future (java.util.concurrent.Future)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 Collectors (java.util.stream.Collectors)2 AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)2 Matchers.empty (org.hamcrest.Matchers.empty)2 Matchers.equalTo (org.hamcrest.Matchers.equalTo)2 Matchers.greaterThanOrEqualTo (org.hamcrest.Matchers.greaterThanOrEqualTo)2