Search in sources :

Example 1 with MockTransportService

use of org.opensearch.test.transport.MockTransportService in project OpenSearch by opensearch-project.

the class VotingConfigurationIT method testElectsNodeNotInVotingConfiguration.

public void testElectsNodeNotInVotingConfiguration() throws Exception {
    internalCluster().setBootstrapMasterNodeIndex(0);
    final List<String> nodeNames = internalCluster().startNodes(4);
    // a 4-node cluster settles on a 3-node configuration; we then prevent the nodes in the configuration from winning an election
    // by failing at the pre-voting stage, so that the extra node must be elected instead when the master shuts down. This extra node
    // should then add itself into the voting configuration.
    assertFalse(internalCluster().client().admin().cluster().prepareHealth().setWaitForNodes("4").setWaitForEvents(Priority.LANGUID).get().isTimedOut());
    String excludedNodeName = null;
    final ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().clear().setNodes(true).setMetadata(true).get().getState();
    final Set<String> votingConfiguration = clusterState.getLastCommittedConfiguration().getNodeIds();
    assertThat(votingConfiguration, hasSize(3));
    assertThat(clusterState.nodes().getSize(), equalTo(4));
    assertThat(votingConfiguration, hasItem(clusterState.nodes().getMasterNodeId()));
    for (DiscoveryNode discoveryNode : clusterState.nodes()) {
        if (votingConfiguration.contains(discoveryNode.getId()) == false) {
            assertThat(excludedNodeName, nullValue());
            excludedNodeName = discoveryNode.getName();
        }
    }
    for (final String sender : nodeNames) {
        if (sender.equals(excludedNodeName)) {
            continue;
        }
        final MockTransportService senderTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, sender);
        for (final String receiver : nodeNames) {
            senderTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, receiver), (connection, requestId, action, request, options) -> {
                if (action.equals(PreVoteCollector.REQUEST_PRE_VOTE_ACTION_NAME)) {
                    throw new OpenSearchException("rejected");
                }
                connection.sendRequest(requestId, action, request, options);
            });
        }
    }
    internalCluster().stopCurrentMasterNode();
    assertFalse(internalCluster().client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForEvents(Priority.LANGUID).get().isTimedOut());
    final ClusterState newClusterState = internalCluster().client().admin().cluster().prepareState().clear().setNodes(true).setMetadata(true).get().getState();
    assertThat(newClusterState.nodes().getMasterNode().getName(), equalTo(excludedNodeName));
    assertThat(newClusterState.getLastCommittedConfiguration().getNodeIds(), hasItem(newClusterState.nodes().getMasterNodeId()));
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) MockTransportService(org.opensearch.test.transport.MockTransportService) MockTransportService(org.opensearch.test.transport.MockTransportService) TransportService(org.opensearch.transport.TransportService) OpenSearchException(org.opensearch.OpenSearchException)

Example 2 with MockTransportService

use of org.opensearch.test.transport.MockTransportService in project OpenSearch by opensearch-project.

the class IndexingPressureIT method testWriteBytesAreIncremented.

public void testWriteBytesAreIncremented() throws Exception {
    assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
    ensureGreen(INDEX_NAME);
    Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames();
    String primaryName = primaryReplicaNodeNames.v1();
    String replicaName = primaryReplicaNodeNames.v2();
    String coordinatingOnlyNode = getCoordinatingOnlyNode();
    final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
    final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
    TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
    final MockTransportService primaryTransportService = (MockTransportService) primaryService;
    TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
    final MockTransportService replicaTransportService = (MockTransportService) replicaService;
    primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
        if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
            try {
                replicationSendPointReached.countDown();
                latchBlockingReplicationSend.await();
            } catch (InterruptedException e) {
                throw new IllegalStateException(e);
            }
        }
        connection.sendRequest(requestId, action, request, options);
    });
    final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
    final Releasable replicaRelease = blockReplicas(replicaThreadPool);
    final BulkRequest bulkRequest = new BulkRequest();
    int totalRequestSize = 0;
    for (int i = 0; i < 80; ++i) {
        IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
        totalRequestSize += request.ramBytesUsed();
        assertTrue(request.ramBytesUsed() > request.source().length());
        bulkRequest.add(request);
    }
    final long bulkRequestSize = bulkRequest.ramBytesUsed();
    final long bulkShardRequestSize = totalRequestSize;
    try {
        final ActionFuture<BulkResponse> successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
        replicationSendPointReached.await();
        IndexingPressure primaryWriteLimits = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure();
        IndexingPressure replicaWriteLimits = internalCluster().getInstance(IndexingPressureService.class, replicaName).getShardIndexingPressure();
        ;
        IndexingPressure coordinatingWriteLimits = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure();
        ;
        assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize));
        assertThat(primaryWriteLimits.getCurrentPrimaryBytes(), greaterThan(bulkShardRequestSize));
        assertEquals(0, primaryWriteLimits.getCurrentCoordinatingBytes());
        assertEquals(0, primaryWriteLimits.getCurrentReplicaBytes());
        assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
        assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
        assertEquals(0, replicaWriteLimits.getCurrentReplicaBytes());
        assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCoordinatingBytes());
        assertEquals(0, coordinatingWriteLimits.getCurrentPrimaryBytes());
        assertEquals(0, coordinatingWriteLimits.getCurrentReplicaBytes());
        latchBlockingReplicationSend.countDown();
        IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
        final BulkRequest secondBulkRequest = new BulkRequest();
        secondBulkRequest.add(request);
        // Use the primary or the replica data node as the coordinating node this time
        boolean usePrimaryAsCoordinatingNode = randomBoolean();
        final ActionFuture<BulkResponse> secondFuture;
        if (usePrimaryAsCoordinatingNode) {
            secondFuture = client(primaryName).bulk(secondBulkRequest);
        } else {
            secondFuture = client(replicaName).bulk(secondBulkRequest);
        }
        final long secondBulkRequestSize = secondBulkRequest.ramBytesUsed();
        final long secondBulkShardRequestSize = request.ramBytesUsed();
        if (usePrimaryAsCoordinatingNode) {
            assertBusy(() -> {
                assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize + secondBulkRequestSize));
                assertEquals(secondBulkRequestSize, primaryWriteLimits.getCurrentCoordinatingBytes());
                assertThat(primaryWriteLimits.getCurrentPrimaryBytes(), greaterThan(bulkShardRequestSize + secondBulkRequestSize));
                assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
                assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
                assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
            });
        } else {
            assertThat(primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(bulkShardRequestSize));
            assertEquals(secondBulkRequestSize, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
            assertEquals(secondBulkRequestSize, replicaWriteLimits.getCurrentCoordinatingBytes());
            assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
        }
        assertEquals(bulkRequestSize, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertBusy(() -> assertThat(replicaWriteLimits.getCurrentReplicaBytes(), greaterThan(bulkShardRequestSize + secondBulkShardRequestSize)));
        replicaRelease.close();
        successFuture.actionGet();
        secondFuture.actionGet();
        assertEquals(0, primaryWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, primaryWriteLimits.getCurrentCoordinatingBytes());
        assertEquals(0, primaryWriteLimits.getCurrentPrimaryBytes());
        assertEquals(0, primaryWriteLimits.getCurrentReplicaBytes());
        assertEquals(0, replicaWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, replicaWriteLimits.getCurrentCoordinatingBytes());
        assertEquals(0, replicaWriteLimits.getCurrentPrimaryBytes());
        assertEquals(0, replicaWriteLimits.getCurrentReplicaBytes());
        assertEquals(0, coordinatingWriteLimits.getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, coordinatingWriteLimits.getCurrentCoordinatingBytes());
        assertEquals(0, coordinatingWriteLimits.getCurrentPrimaryBytes());
        assertEquals(0, coordinatingWriteLimits.getCurrentReplicaBytes());
    } finally {
        if (replicationSendPointReached.getCount() > 0) {
            replicationSendPointReached.countDown();
        }
        replicaRelease.close();
        if (latchBlockingReplicationSend.getCount() > 0) {
            latchBlockingReplicationSend.countDown();
        }
        replicaRelease.close();
        primaryTransportService.clearAllRules();
    }
}
Also used : MockTransportService(org.opensearch.test.transport.MockTransportService) ThreadPool(org.opensearch.threadpool.ThreadPool) BulkResponse(org.opensearch.action.bulk.BulkResponse) CountDownLatch(java.util.concurrent.CountDownLatch) IndexRequest(org.opensearch.action.index.IndexRequest) MockTransportService(org.opensearch.test.transport.MockTransportService) TransportService(org.opensearch.transport.TransportService) BulkRequest(org.opensearch.action.bulk.BulkRequest) Releasable(org.opensearch.common.lease.Releasable)

Example 3 with MockTransportService

use of org.opensearch.test.transport.MockTransportService in project OpenSearch by opensearch-project.

the class ShardIndexingPressureIT method testShardIndexingPressureTrackingDuringBulkWrites.

public void testShardIndexingPressureTrackingDuringBulkWrites() throws Exception {
    assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)));
    ensureGreen(INDEX_NAME);
    Tuple<String, String> primaryReplicaNodeNames = getPrimaryReplicaNodeNames(INDEX_NAME);
    String primaryName = primaryReplicaNodeNames.v1();
    String replicaName = primaryReplicaNodeNames.v2();
    String coordinatingOnlyNode = getCoordinatingOnlyNode();
    final CountDownLatch replicationSendPointReached = new CountDownLatch(1);
    final CountDownLatch latchBlockingReplicationSend = new CountDownLatch(1);
    TransportService primaryService = internalCluster().getInstance(TransportService.class, primaryName);
    final MockTransportService primaryTransportService = (MockTransportService) primaryService;
    TransportService replicaService = internalCluster().getInstance(TransportService.class, replicaName);
    final MockTransportService replicaTransportService = (MockTransportService) replicaService;
    primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
        if (action.equals(TransportShardBulkAction.ACTION_NAME + "[r]")) {
            try {
                replicationSendPointReached.countDown();
                latchBlockingReplicationSend.await();
            } catch (InterruptedException e) {
                throw new IllegalStateException(e);
            }
        }
        connection.sendRequest(requestId, action, request, options);
    });
    final ThreadPool replicaThreadPool = replicaTransportService.getThreadPool();
    final Releasable replicaRelease = blockReplicas(replicaThreadPool);
    final BulkRequest bulkRequest = new BulkRequest();
    int totalRequestSize = 0;
    for (int i = 0; i < 80; ++i) {
        IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
        totalRequestSize += request.ramBytesUsed();
        assertTrue(request.ramBytesUsed() > request.source().length());
        bulkRequest.add(request);
    }
    final long bulkShardRequestSize = totalRequestSize + (RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) * 80) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
    try {
        final ActionFuture<BulkResponse> successFuture = client(coordinatingOnlyNode).bulk(bulkRequest);
        replicationSendPointReached.await();
        IndexService indexService = internalCluster().getInstance(IndicesService.class, primaryName).iterator().next();
        Index index = indexService.getIndexSettings().getIndex();
        ShardId shardId = new ShardId(index, 0);
        ShardIndexingPressureTracker primaryShardTracker = internalCluster().getInstance(IndexingPressureService.class, primaryName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
        ShardIndexingPressureTracker replicaShardTracker = internalCluster().getInstance(IndexingPressureService.class, replicaName).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
        ShardIndexingPressureTracker coordinatingShardTracker = internalCluster().getInstance(IndexingPressureService.class, coordinatingOnlyNode).getShardIndexingPressure().getShardIndexingPressureTracker(shardId);
        assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize));
        assertThat(primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize));
        assertEquals(0, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, primaryShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, coordinatingShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, coordinatingShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
        latchBlockingReplicationSend.countDown();
        IndexRequest request = new IndexRequest(INDEX_NAME).id(UUIDs.base64UUID()).source(Collections.singletonMap("key", randomAlphaOfLength(50)));
        final BulkRequest secondBulkRequest = new BulkRequest();
        secondBulkRequest.add(request);
        // Use the primary or the replica data node as the coordinating node this time
        boolean usePrimaryAsCoordinatingNode = randomBoolean();
        final ActionFuture<BulkResponse> secondFuture;
        if (usePrimaryAsCoordinatingNode) {
            secondFuture = client(primaryName).bulk(secondBulkRequest);
        } else {
            secondFuture = client(replicaName).bulk(secondBulkRequest);
        }
        final long secondBulkShardRequestSize = request.ramBytesUsed() + RamUsageEstimator.shallowSizeOfInstance(BulkItemRequest.class) + RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
        if (usePrimaryAsCoordinatingNode) {
            assertBusy(() -> {
                assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize));
                assertEquals(secondBulkShardRequestSize, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
                assertThat(primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize));
                assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
                assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
                assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
            });
        } else {
            assertThat(primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(bulkShardRequestSize));
            assertEquals(secondBulkShardRequestSize, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
            assertEquals(secondBulkShardRequestSize, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
            assertEquals(0, replicaShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
        }
        assertEquals(bulkShardRequestSize, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertBusy(() -> assertThat(replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes(), equalTo(bulkShardRequestSize + secondBulkShardRequestSize)));
        replicaRelease.close();
        successFuture.actionGet();
        secondFuture.actionGet();
        assertEquals(0, primaryShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, primaryShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, primaryShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, primaryShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, replicaShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, replicaShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, replicaShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, coordinatingShardTracker.getCommonOperationTracker().getCurrentCombinedCoordinatingAndPrimaryBytes());
        assertEquals(0, coordinatingShardTracker.getCoordinatingOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, coordinatingShardTracker.getPrimaryOperationTracker().getStatsTracker().getCurrentBytes());
        assertEquals(0, coordinatingShardTracker.getReplicaOperationTracker().getStatsTracker().getCurrentBytes());
    } finally {
        if (replicationSendPointReached.getCount() > 0) {
            replicationSendPointReached.countDown();
        }
        replicaRelease.close();
        if (latchBlockingReplicationSend.getCount() > 0) {
            latchBlockingReplicationSend.countDown();
        }
        replicaRelease.close();
        primaryTransportService.clearAllRules();
    }
}
Also used : BulkShardRequest(org.opensearch.action.bulk.BulkShardRequest) MockTransportService(org.opensearch.test.transport.MockTransportService) ThreadPool(org.opensearch.threadpool.ThreadPool) BulkResponse(org.opensearch.action.bulk.BulkResponse) CountDownLatch(java.util.concurrent.CountDownLatch) IndexRequest(org.opensearch.action.index.IndexRequest) ShardId(org.opensearch.index.shard.ShardId) MockTransportService(org.opensearch.test.transport.MockTransportService) TransportService(org.opensearch.transport.TransportService) BulkRequest(org.opensearch.action.bulk.BulkRequest) Releasable(org.opensearch.common.lease.Releasable)

Example 4 with MockTransportService

use of org.opensearch.test.transport.MockTransportService in project OpenSearch by opensearch-project.

the class RetentionLeaseIT method testRetentionLeasesSyncOnRecovery.

public void testRetentionLeasesSyncOnRecovery() throws Exception {
    final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
    internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
    /*
         * We effectively disable the background sync to ensure that the retention leases are not synced in the background so that the only
         * source of retention leases on the replicas would be from recovery.
         */
    final Settings.Builder settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24));
    // when we increase the number of replicas below we want to exclude the replicas from being allocated so that they do not recover
    assertAcked(prepareCreate("index", 1, settings));
    ensureYellow("index");
    final AcknowledgedResponse response = client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas).build()).get();
    assertTrue(response.isAcknowledged());
    final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId();
    final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
    final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
    final int length = randomIntBetween(1, 8);
    final Map<String, RetentionLease> currentRetentionLeases = new LinkedHashMap<>();
    logger.info("adding retention [{}}] leases", length);
    for (int i = 0; i < length; i++) {
        final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
        final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
        final String source = randomAlphaOfLength(8);
        final CountDownLatch latch = new CountDownLatch(1);
        final ActionListener<ReplicationResponse> listener = countDownLatchListener(latch);
        currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener));
        latch.await();
    }
    logger.info("finished adding [{}] retention leases", length);
    // cause some recoveries to fail to ensure that retention leases are handled properly when retrying a recovery
    assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), TimeValue.timeValueMillis(100))));
    final Semaphore recoveriesToDisrupt = new Semaphore(scaledRandomIntBetween(0, 4));
    final MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, primaryShardNodeName);
    primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> {
        if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE) && recoveriesToDisrupt.tryAcquire()) {
            if (randomBoolean()) {
                // return a ConnectTransportException to the START_RECOVERY action
                final TransportService replicaTransportService = internalCluster().getInstance(TransportService.class, connection.getNode().getName());
                final DiscoveryNode primaryNode = primaryTransportService.getLocalNode();
                replicaTransportService.disconnectFromNode(primaryNode);
                replicaTransportService.connectToNode(primaryNode);
            } else {
                // return an exception to the FINALIZE action
                throw new OpenSearchException("failing recovery for test purposes");
            }
        }
        connection.sendRequest(requestId, action, request, options);
    });
    logger.info("allow [{}] replicas to allocate", numberOfReplicas);
    // now allow the replicas to be allocated and wait for recovery to finalize
    allowNodes("index", 1 + numberOfReplicas);
    ensureGreen("index");
    // check current retention leases have been synced to all replicas
    for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) {
        final String replicaShardNodeId = replicaShard.currentNodeId();
        final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
        final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("index"), 0));
        final Map<String, RetentionLease> retentionLeasesOnReplica = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases());
        assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases));
        // check retention leases have been written on the replica; see RecoveryTarget#finalizeRecovery
        assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.loadRetentionLeases())));
    }
}
Also used : DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) MockTransportService(org.opensearch.test.transport.MockTransportService) IndexShard(org.opensearch.index.shard.IndexShard) AcknowledgedResponse(org.opensearch.action.support.master.AcknowledgedResponse) IndicesService(org.opensearch.indices.IndicesService) Semaphore(java.util.concurrent.Semaphore) CountDownLatch(java.util.concurrent.CountDownLatch) LinkedHashMap(java.util.LinkedHashMap) ReplicationResponse(org.opensearch.action.support.replication.ReplicationResponse) ShardId(org.opensearch.index.shard.ShardId) MockTransportService(org.opensearch.test.transport.MockTransportService) TransportService(org.opensearch.transport.TransportService) OpenSearchException(org.opensearch.OpenSearchException) ShardRouting(org.opensearch.cluster.routing.ShardRouting) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 5 with MockTransportService

use of org.opensearch.test.transport.MockTransportService in project OpenSearch by opensearch-project.

the class DiscoveryDisruptionIT method testNodeNotReachableFromMaster.

/**
 * Adds an asymmetric break between a master and one of the nodes and makes
 * sure that the node is removed form the cluster, that the node start pinging and that
 * the cluster reforms when healed.
 */
public void testNodeNotReachableFromMaster() throws Exception {
    startCluster(3);
    String masterNode = internalCluster().getMasterName();
    String nonMasterNode = null;
    while (nonMasterNode == null) {
        nonMasterNode = randomFrom(internalCluster().getNodeNames());
        if (nonMasterNode.equals(masterNode)) {
            nonMasterNode = null;
        }
    }
    logger.info("blocking request from master [{}] to [{}]", masterNode, nonMasterNode);
    MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode);
    if (randomBoolean()) {
        masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonMasterNode));
    } else {
        masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, nonMasterNode));
    }
    logger.info("waiting for [{}] to be removed from cluster", nonMasterNode);
    ensureStableCluster(2, masterNode);
    logger.info("waiting for [{}] to have no master", nonMasterNode);
    assertNoMaster(nonMasterNode);
    logger.info("healing partition and checking cluster reforms");
    masterTransportService.clearAllRules();
    ensureStableCluster(3);
}
Also used : MockTransportService(org.opensearch.test.transport.MockTransportService) MockTransportService(org.opensearch.test.transport.MockTransportService) TransportService(org.opensearch.transport.TransportService)

Aggregations

MockTransportService (org.opensearch.test.transport.MockTransportService)109 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)65 Settings (org.opensearch.common.settings.Settings)60 CountDownLatch (java.util.concurrent.CountDownLatch)49 TransportService (org.opensearch.transport.TransportService)41 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)36 ClusterSettings (org.opensearch.common.settings.ClusterSettings)27 AbstractScopedSettings (org.opensearch.common.settings.AbstractScopedSettings)23 IOException (java.io.IOException)22 ArrayList (java.util.ArrayList)21 List (java.util.List)19 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)18 AtomicReference (java.util.concurrent.atomic.AtomicReference)17 ClusterState (org.opensearch.cluster.ClusterState)17 TransportAddress (org.opensearch.common.transport.TransportAddress)16 HashSet (java.util.HashSet)15 Matchers.containsString (org.hamcrest.Matchers.containsString)15 Version (org.opensearch.Version)15 StubbableTransport (org.opensearch.test.transport.StubbableTransport)15 OpenSearchException (org.opensearch.OpenSearchException)14