use of org.opensearch.cluster.InternalClusterInfoService in project OpenSearch by opensearch-project.
the class DiskThresholdDeciderIT method refreshDiskUsage.
private void refreshDiskUsage() {
final ClusterInfoService clusterInfoService = internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class);
((InternalClusterInfoService) clusterInfoService).refresh();
// even though it's now possible to achieve better balance, so we have to do an explicit reroute. TODO fix this?
if (StreamSupport.stream(clusterInfoService.getClusterInfo().getNodeMostAvailableDiskUsages().values().spliterator(), false).allMatch(cur -> cur.value.getFreeBytes() > WATERMARK_BYTES)) {
assertAcked(client().admin().cluster().prepareReroute());
}
assertFalse(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setWaitForNoInitializingShards(true).get().isTimedOut());
}
use of org.opensearch.cluster.InternalClusterInfoService in project OpenSearch by opensearch-project.
the class DiskThresholdDeciderIT method testHighWatermarkNotExceeded.
public void testHighWatermarkNotExceeded() throws Exception {
internalCluster().startMasterOnlyNode();
internalCluster().startDataOnlyNode();
final String dataNodeName = internalCluster().startDataOnlyNode();
ensureStableCluster(3);
final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class);
internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh());
final String dataNode0Id = internalCluster().getInstance(NodeEnvironment.class, dataNodeName).nodeId();
final Path dataNode0Path = internalCluster().getInstance(Environment.class, dataNodeName).dataFiles()[0];
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build());
final long minShardSize = createReasonableSizedShards(indexName);
// reduce disk size of node 0 so that no shards fit below the high watermark, forcing all shards onto the other data node
// (subtract the translog size since the disk threshold decider ignores this and may therefore move the shard back again)
fileSystemProvider.getTestFileStore(dataNode0Path).setTotalSpace(minShardSize + WATERMARK_BYTES - 1L);
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, empty());
// increase disk size of node 0 to allow just enough room for one shard, and check that it's rebalanced back
fileSystemProvider.getTestFileStore(dataNode0Path).setTotalSpace(minShardSize + WATERMARK_BYTES + 1L);
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, hasSize(1));
}
use of org.opensearch.cluster.InternalClusterInfoService in project OpenSearch by opensearch-project.
the class IndexShardIT method testExpectedShardSizeIsPresent.
public void testExpectedShardSizeIsPresent() throws InterruptedException {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)));
for (int i = 0; i < 50; i++) {
client().prepareIndex("test").setSource("{}", XContentType.JSON).get();
}
ensureGreen("test");
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
clusterInfoService.refresh();
ClusterState state = getInstanceFromNode(ClusterService.class).state();
Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test").getShards().get(0).primaryShard());
assertNotNull(test);
assertTrue(test > 0);
}
use of org.opensearch.cluster.InternalClusterInfoService in project OpenSearch by opensearch-project.
the class ShrinkIndexIT method testCreateShrinkIndexFails.
/**
* Tests that we can manually recover from a failed allocation due to shards being moved away etc.
*/
public void testCreateShrinkIndexFails() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0)).get();
for (int i = 0; i < 20; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
String spareNode = discoveryNodes[0].getName();
String mergeNode = discoveryNodes[1].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)).get();
ensureGreen();
// now merge source into a single shard index
client().admin().indices().prepareResizeIndex("source", "target").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder().put("index.routing.allocation.exclude._name", // we manually exclude the merge node to forcefully fuck it up
mergeNode).put("index.number_of_replicas", 0).put("index.allocation.max_retries", 1).build()).get();
client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get();
// now we move all shards away from the merge node
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)).get();
ensureGreen("source");
client().admin().indices().prepareUpdateSettings(// erase the forcefully fuckup!
"target").setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get();
// wait until it fails
assertBusy(() -> {
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
RoutingTable routingTables = clusterStateResponse.getState().routingTable();
assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned());
assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason());
assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());
});
client().admin().indices().prepareUpdateSettings(// now relocate them all to the right node
"source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)).get();
ensureGreen("source");
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName());
infoService.refresh();
// kick off a retry and wait until it's done!
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target").shard(0).getShards().get(0).getExpectedShardSize();
// we support the expected shard size in the allocator to sum up over the source index shards
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
ensureGreen();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
}
use of org.opensearch.cluster.InternalClusterInfoService in project OpenSearch by opensearch-project.
the class DeleteByQueryBasicTests method testDeleteByQueryOnReadOnlyAllowDeleteIndex.
public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception {
createIndex("test");
final int docs = randomIntBetween(1, 50);
List<IndexRequestBuilder> builders = new ArrayList<>();
for (int i = 0; i < docs; i++) {
builders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", 1));
}
indexRandom(true, true, true, builders);
// Because the index level read_only_allow_delete block can be automatically released by disk allocation decider,
// so we should test both case of disk allocation decider is enabled and disabled
boolean diskAllocationDeciderEnabled = randomBoolean();
try {
if (diskAllocationDeciderEnabled == false) {
// Disable the disk allocation decider to ensure the read_only_allow_delete block cannot be released
setDiskAllocationDeciderEnabled(false);
}
// When a read_only_allow_delete block is set on the index,
// it will trigger a retry policy in the delete by query request because the rest status of the block is 429
enableIndexBlock("test", SETTING_READ_ONLY_ALLOW_DELETE);
if (diskAllocationDeciderEnabled) {
InternalTestCluster internalTestCluster = internalCluster();
InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
ThreadPool threadPool = internalTestCluster.getInstance(ThreadPool.class, internalTestCluster.getMasterName());
// Refresh the cluster info after a random delay to check the disk threshold and release the block on the index
threadPool.schedule(infoService::refresh, TimeValue.timeValueMillis(randomIntBetween(1, 100)), ThreadPool.Names.MANAGEMENT);
// The delete by query request will be executed successfully because the block will be released
assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), matcher().deleted(docs));
} else {
// The delete by query request will not be executed successfully because the block cannot be released
assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).setMaxRetries(2).setRetryBackoffInitialTime(TimeValue.timeValueMillis(50)).get(), matcher().deleted(0).failures(docs));
}
} finally {
disableIndexBlock("test", SETTING_READ_ONLY_ALLOW_DELETE);
if (diskAllocationDeciderEnabled == false) {
setDiskAllocationDeciderEnabled(true);
}
}
if (diskAllocationDeciderEnabled) {
assertHitCount(client().prepareSearch("test").setSize(0).get(), 0);
} else {
assertHitCount(client().prepareSearch("test").setSize(0).get(), docs);
}
}
Aggregations