use of org.opensearch.test.InternalTestCluster in project OpenSearch by opensearch-project.
the class InternalTestClusterTests method testDifferentRolesMaintainPathOnRestart.
public void testDifferentRolesMaintainPathOnRestart() throws Exception {
final Path baseDir = createTempDir();
final int numNodes = 5;
InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, false, 0, 0, "test", new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()).put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0).putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file").putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()).build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
}, 0, "", mockPlugins(), Function.identity());
cluster.beforeTest(random());
List<DiscoveryNodeRole> roles = new ArrayList<>();
for (int i = 0; i < numNodes; i++) {
final DiscoveryNodeRole role = i == numNodes - 1 && roles.contains(DiscoveryNodeRole.MASTER_ROLE) == false ? DiscoveryNodeRole.MASTER_ROLE : // last node and still no master
randomFrom(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.INGEST_ROLE);
roles.add(role);
}
cluster.setBootstrapMasterNodeIndex(randomIntBetween(0, (int) roles.stream().filter(role -> role.equals(DiscoveryNodeRole.MASTER_ROLE)).count() - 1));
try {
Map<DiscoveryNodeRole, Set<String>> pathsPerRole = new HashMap<>();
for (int i = 0; i < numNodes; i++) {
final DiscoveryNodeRole role = roles.get(i);
final String node;
if (role == DiscoveryNodeRole.MASTER_ROLE) {
node = cluster.startMasterOnlyNode();
} else if (role == DiscoveryNodeRole.DATA_ROLE) {
node = cluster.startDataOnlyNode();
} else if (role == DiscoveryNodeRole.INGEST_ROLE) {
node = cluster.startCoordinatingOnlyNode(Settings.EMPTY);
} else {
throw new IllegalStateException("get your story straight");
}
Set<String> rolePaths = pathsPerRole.computeIfAbsent(role, k -> new HashSet<>());
for (Path path : getNodePaths(cluster, node)) {
assertTrue(rolePaths.add(path.toString()));
}
}
cluster.validateClusterFormed();
cluster.fullRestart();
Map<DiscoveryNodeRole, Set<String>> result = new HashMap<>();
for (String name : cluster.getNodeNames()) {
DiscoveryNode node = cluster.getInstance(ClusterService.class, name).localNode();
List<String> paths = Arrays.stream(getNodePaths(cluster, name)).map(Path::toString).collect(Collectors.toList());
if (node.isMasterNode()) {
result.computeIfAbsent(DiscoveryNodeRole.MASTER_ROLE, k -> new HashSet<>()).addAll(paths);
} else if (node.isDataNode()) {
result.computeIfAbsent(DiscoveryNodeRole.DATA_ROLE, k -> new HashSet<>()).addAll(paths);
} else {
result.computeIfAbsent(DiscoveryNodeRole.INGEST_ROLE, k -> new HashSet<>()).addAll(paths);
}
}
assertThat(result.size(), equalTo(pathsPerRole.size()));
for (DiscoveryNodeRole role : result.keySet()) {
assertThat("path are not the same for " + role, result.get(role), equalTo(pathsPerRole.get(role)));
}
} finally {
cluster.close();
}
}
use of org.opensearch.test.InternalTestCluster in project OpenSearch by opensearch-project.
the class InternalTestClusterTests method testBeforeTest.
public void testBeforeTest() throws Exception {
final boolean autoManageMinMasterNodes = randomBoolean();
long clusterSeed = randomLong();
final boolean masterNodes;
final int minNumDataNodes;
final int maxNumDataNodes;
final int bootstrapMasterNodeIndex;
if (autoManageMinMasterNodes) {
masterNodes = randomBoolean();
minNumDataNodes = randomIntBetween(0, 3);
maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
bootstrapMasterNodeIndex = -1;
} else {
// if we manage min master nodes, we need to lock down the number of nodes
minNumDataNodes = randomIntBetween(0, 4);
maxNumDataNodes = minNumDataNodes;
masterNodes = false;
bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1);
}
final int numClientNodes = randomIntBetween(0, 2);
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
final Settings.Builder settings = Settings.builder().put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file").putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()).put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType());
if (autoManageMinMasterNodes == false) {
assert minNumDataNodes == maxNumDataNodes;
assert masterNodes == false;
}
return settings.build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
};
String nodePrefix = "foobar";
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity());
cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex);
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity());
cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex);
assertClusters(cluster0, cluster1, false);
long seed = randomLong();
try {
{
Random random = new Random(seed);
cluster0.beforeTest(random);
}
{
Random random = new Random(seed);
cluster1.beforeTest(random);
}
assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames());
Iterator<Client> iterator1 = cluster1.getClients().iterator();
for (Client client : cluster0.getClients()) {
assertTrue(iterator1.hasNext());
Client other = iterator1.next();
assertSettings(client.settings(), other.settings(), false);
}
cluster0.afterTest();
cluster1.afterTest();
} finally {
IOUtils.close(cluster0, cluster1);
}
}
use of org.opensearch.test.InternalTestCluster in project OpenSearch by opensearch-project.
the class SingleNodeDiscoveryIT method testSingleNodesDoNotDiscoverEachOther.
public void testSingleNodesDoNotDiscoverEachOther() throws IOException, InterruptedException {
final TransportService service = internalCluster().getInstance(TransportService.class);
final int port = service.boundAddress().publishAddress().getPort();
final NodeConfigurationSource configurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put("discovery.type", "single-node").put("transport.type", getTestTransportType()).put("transport.port", port + "-" + (port + 5 - 1)).build();
}
@Override
public Path nodeConfigPath(int nodeOrdinal) {
return null;
}
};
try (InternalTestCluster other = new InternalTestCluster(randomLong(), createTempDir(), false, false, 1, 1, internalCluster().getClusterName(), configurationSource, 0, "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) {
other.beforeTest(random());
final ClusterState first = internalCluster().getInstance(ClusterService.class).state();
final ClusterState second = other.getInstance(ClusterService.class).state();
assertThat(first.nodes().getSize(), equalTo(1));
assertThat(second.nodes().getSize(), equalTo(1));
assertThat(first.nodes().getMasterNodeId(), not(equalTo(second.nodes().getMasterNodeId())));
assertThat(first.metadata().clusterUUID(), not(equalTo(second.metadata().clusterUUID())));
}
}
use of org.opensearch.test.InternalTestCluster in project OpenSearch by opensearch-project.
the class DeleteByQueryBasicTests method testDeleteByQueryOnReadOnlyAllowDeleteIndex.
public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception {
createIndex("test");
final int docs = randomIntBetween(1, 50);
List<IndexRequestBuilder> builders = new ArrayList<>();
for (int i = 0; i < docs; i++) {
builders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1));
}
indexRandom(true, true, true, builders);
// Because the index level read_only_allow_delete block can be automatically released by disk allocation decider,
// so we should test both case of disk allocation decider is enabled and disabled
boolean diskAllocationDeciderEnabled = randomBoolean();
try {
if (diskAllocationDeciderEnabled == false) {
// Disable the disk allocation decider to ensure the read_only_allow_delete block cannot be released
setDiskAllocationDeciderEnabled(false);
}
// When a read_only_allow_delete block is set on the index,
// it will trigger a retry policy in the delete by query request because the rest status of the block is 429
enableIndexBlock("test", SETTING_READ_ONLY_ALLOW_DELETE);
if (diskAllocationDeciderEnabled) {
InternalTestCluster internalTestCluster = internalCluster();
InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
ThreadPool threadPool = internalTestCluster.getInstance(ThreadPool.class, internalTestCluster.getMasterName());
// Refresh the cluster info after a random delay to check the disk threshold and release the block on the index
threadPool.schedule(infoService::refresh, TimeValue.timeValueMillis(randomIntBetween(1, 100)), ThreadPool.Names.MANAGEMENT);
// The delete by query request will be executed successfully because the block will be released
assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), matcher().deleted(docs));
} else {
// The delete by query request will not be executed successfully because the block cannot be released
assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).setMaxRetries(2).setRetryBackoffInitialTime(TimeValue.timeValueMillis(50)).get(), matcher().deleted(0).failures(docs));
}
} finally {
disableIndexBlock("test", SETTING_READ_ONLY_ALLOW_DELETE);
if (diskAllocationDeciderEnabled == false) {
setDiskAllocationDeciderEnabled(true);
}
}
if (diskAllocationDeciderEnabled) {
assertHitCount(client().prepareSearch("test").setSize(0).get(), 0);
} else {
assertHitCount(client().prepareSearch("test").setSize(0).get(), docs);
}
}
use of org.opensearch.test.InternalTestCluster in project OpenSearch by opensearch-project.
the class ClusterInfoServiceIT method testClusterInfoServiceInformationClearOnError.
public void testClusterInfoServiceInformationClearOnError() {
internalCluster().startNodes(2, // manually control publishing
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build());
prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get();
ensureGreen("test");
InternalTestCluster internalTestCluster = internalCluster();
InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
// get one healthy sample
ClusterInfo info = infoService.refresh();
assertNotNull("failed to collect info", info);
assertThat("some usages are populated", info.getNodeLeastAvailableDiskUsages().size(), Matchers.equalTo(2));
assertThat("some shard sizes are populated", info.shardSizes.size(), greaterThan(0));
MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, internalTestCluster.getMasterName());
final AtomicBoolean timeout = new AtomicBoolean(false);
final Set<String> blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]");
// drop all outgoing stats requests to force a timeout.
for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) {
mockTransportService.addSendBehavior(internalTestCluster.getInstance(TransportService.class, node.getName()), (connection, requestId, action, request, options) -> {
if (blockedActions.contains(action)) {
if (timeout.get()) {
logger.info("dropping [{}] to [{}]", action, node);
return;
}
}
connection.sendRequest(requestId, action, request, options);
});
}
setClusterInfoTimeout("1s");
// timeouts shouldn't clear the info
timeout.set(true);
info = infoService.refresh();
assertNotNull("info should not be null", info);
// node info will time out both on the request level on the count down latch. this means
// it is likely to update the node disk usage based on the one response that came be from local
// node.
assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThanOrEqualTo(1));
assertThat(info.getNodeMostAvailableDiskUsages().size(), greaterThanOrEqualTo(1));
// indices is guaranteed to time out on the latch, not updating anything.
assertThat(info.shardSizes.size(), greaterThan(1));
// now we cause an exception
timeout.set(false);
ActionFilters actionFilters = internalTestCluster.getInstance(ActionFilters.class, internalTestCluster.getMasterName());
BlockingActionFilter blockingActionFilter = null;
for (ActionFilter filter : actionFilters.filters()) {
if (filter instanceof BlockingActionFilter) {
blockingActionFilter = (BlockingActionFilter) filter;
break;
}
}
assertNotNull("failed to find BlockingActionFilter", blockingActionFilter);
blockingActionFilter.blockActions(blockedActions.toArray(Strings.EMPTY_ARRAY));
info = infoService.refresh();
assertNotNull("info should not be null", info);
assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(0));
assertThat(info.getNodeMostAvailableDiskUsages().size(), equalTo(0));
assertThat(info.shardSizes.size(), equalTo(0));
assertThat(info.reservedSpace.size(), equalTo(0));
// check we recover
blockingActionFilter.blockActions();
setClusterInfoTimeout("15s");
info = infoService.refresh();
assertNotNull("info should not be null", info);
assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(2));
assertThat(info.getNodeMostAvailableDiskUsages().size(), equalTo(2));
assertThat(info.shardSizes.size(), greaterThan(0));
RoutingTable routingTable = client().admin().cluster().prepareState().clear().setRoutingTable(true).get().getState().routingTable();
for (ShardRouting shard : routingTable.allShards()) {
assertTrue(info.getReservedSpace(shard.currentNodeId(), info.getDataPath(shard)).containsShardId(shard.shardId()));
}
}
Aggregations