use of org.opensearch.cluster.ClusterInfo in project OpenSearch by opensearch-project.
the class DiskThresholdDeciderTests method testDiskThresholdWithShardSizes.
public void testDiskThresholdWithShardSizes() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "71%").build();
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 69% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31));
// 99% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 1));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
// 10 bytes
shardSizesBuilder.put("[test][0][p]", 10L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
AllocationDeciders deciders = new AllocationDeciders(new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), makeDecider(diskSettings))));
ClusterInfoService cis = () -> {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
};
AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis, EmptySnapshotsInfoService.INSTANCE);
Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)).build();
RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metadata(metadata).routingTable(routingTable).build();
logger.info("--> adding node1");
clusterState = ClusterState.builder(clusterState).nodes(// node2 is added because DiskThresholdDecider
DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("--> start the shards (primaries)");
routingTable = startInitializingShardsAndReroute(strategy, clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logShardStates(clusterState);
// Shard can't be allocated to node1 (or node2) because it would cause too much usage
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
// No shards are started, no nodes have enough disk for allocation
assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0));
}
use of org.opensearch.cluster.ClusterInfo in project OpenSearch by opensearch-project.
the class DiskThresholdDeciderTests method testShardRelocationsTakenIntoAccount.
public void testShardRelocationsTakenIntoAccount() {
Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build();
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
// 60% used
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40));
// 60% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40));
// 60% used
usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 40));
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
// 14 bytes
shardSizesBuilder.put("[test][0][p]", 14L);
shardSizesBuilder.put("[test][0][r]", 14L);
// 1 bytes
shardSizesBuilder.put("[test2][0][p]", 1L);
shardSizesBuilder.put("[test2][0][r]", 1L);
ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
DiskThresholdDecider decider = makeDecider(diskSettings);
final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
AllocationDeciders deciders = new AllocationDeciders(new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), new EnableAllocationDecider(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none").build(), clusterSettings), decider)));
final AtomicReference<ClusterInfo> clusterInfoReference = new AtomicReference<>(clusterInfo);
final ClusterInfoService cis = clusterInfoReference::get;
AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis, EmptySnapshotsInfoService.INSTANCE);
Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).addAsNew(metadata.index("test2")).build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metadata(metadata).routingTable(initialRoutingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
logShardStates(clusterState);
// shards should be initializing
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
logger.info("--> start the shards");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
logShardStates(clusterState);
// Assert that we're able to start the primary and replicas
assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
{
AllocationCommand moveAllocationCommand = new MoveAllocationCommand("test", 0, "node2", "node3");
AllocationCommands cmds = new AllocationCommands(moveAllocationCommand);
clusterState = strategy.reroute(clusterState, cmds, false, false).getClusterState();
logShardStates(clusterState);
}
final ImmutableOpenMap.Builder<String, DiskUsage> overfullUsagesBuilder = ImmutableOpenMap.builder();
// 60% used
overfullUsagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40));
// 60% used
overfullUsagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40));
// 100% used
overfullUsagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0));
final ImmutableOpenMap<String, DiskUsage> overfullUsages = overfullUsagesBuilder.build();
final ImmutableOpenMap.Builder<String, Long> largerShardSizesBuilder = ImmutableOpenMap.builder();
largerShardSizesBuilder.put("[test][0][p]", 14L);
largerShardSizesBuilder.put("[test][0][r]", 14L);
largerShardSizesBuilder.put("[test2][0][p]", 2L);
largerShardSizesBuilder.put("[test2][0][r]", 2L);
final ImmutableOpenMap<String, Long> largerShardSizes = largerShardSizesBuilder.build();
final ClusterInfo overfullClusterInfo = new DevNullClusterInfo(overfullUsages, overfullUsages, largerShardSizes);
{
AllocationCommand moveAllocationCommand = new MoveAllocationCommand("test2", 0, "node2", "node3");
AllocationCommands cmds = new AllocationCommands(moveAllocationCommand);
final ClusterState clusterStateThatRejectsCommands = clusterState;
assertThat(expectThrows(IllegalArgumentException.class, () -> strategy.reroute(clusterStateThatRejectsCommands, cmds, false, false)).getMessage(), containsString("the node is above the low watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.low=0.7], using more disk space than the maximum " + "allowed [70.0%], actual free: [26.0%]"));
clusterInfoReference.set(overfullClusterInfo);
assertThat(expectThrows(IllegalArgumentException.class, () -> strategy.reroute(clusterStateThatRejectsCommands, cmds, false, false)).getMessage(), containsString("the node has fewer free bytes remaining than the total size of all incoming shards"));
clusterInfoReference.set(clusterInfo);
}
{
AllocationCommand moveAllocationCommand = new MoveAllocationCommand("test2", 0, "node2", "node3");
AllocationCommands cmds = new AllocationCommands(moveAllocationCommand);
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
clusterState = strategy.reroute(clusterState, cmds, false, false).getClusterState();
logShardStates(clusterState);
clusterInfoReference.set(overfullClusterInfo);
// ensure reroute doesn't fail even though there is negative free space
strategy.reroute(clusterState, "foo");
}
{
clusterInfoReference.set(overfullClusterInfo);
clusterState = applyStartedShardsUntilNoChange(clusterState, strategy);
final List<ShardRouting> startedShardsWithOverfullDisk = clusterState.getRoutingNodes().shardsWithState(STARTED);
assertThat(startedShardsWithOverfullDisk.size(), equalTo(4));
for (ShardRouting shardRouting : startedShardsWithOverfullDisk) {
// no shards on node3 since it has no free space
assertThat(shardRouting.toString(), shardRouting.currentNodeId(), oneOf("node1", "node2"));
}
// reset free space on node 3 and reserve space on node1
clusterInfoReference.set(new DevNullClusterInfo(usages, usages, shardSizes, (new ImmutableOpenMap.Builder<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace>()).fPut(new ClusterInfo.NodeAndPath("node1", "/dev/null"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), between(51, 200)).build()).build()));
clusterState = applyStartedShardsUntilNoChange(clusterState, strategy);
final List<ShardRouting> startedShardsWithReservedSpace = clusterState.getRoutingNodes().shardsWithState(STARTED);
assertThat(startedShardsWithReservedSpace.size(), equalTo(4));
for (ShardRouting shardRouting : startedShardsWithReservedSpace) {
// no shards on node1 since all its free space is reserved
assertThat(shardRouting.toString(), shardRouting.currentNodeId(), oneOf("node2", "node3"));
}
}
}
use of org.opensearch.cluster.ClusterInfo in project OpenSearch by opensearch-project.
the class RebalanceAfterActiveTests method testRebalanceOnlyAfterAllShardsAreActive.
public void testRebalanceOnlyAfterAllShardsAreActive() {
final long[] sizes = new long[5];
for (int i = 0; i < sizes.length; i++) {
sizes[i] = randomIntBetween(0, Integer.MAX_VALUE);
}
AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), () -> new ClusterInfo() {
@Override
public Long getShardSize(ShardRouting shardRouting) {
if (shardRouting.getIndexName().equals("test")) {
return sizes[shardRouting.getId()];
}
return null;
}
});
logger.info("Building initial routing table");
Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1)).build();
RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metadata(metadata).routingTable(initialRoutingTable).build();
assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
assertThat(clusterState.routingTable().index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
logger.info("start all the primary shards, replicas will start initializing");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertEquals(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
}
logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2));
assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertEquals(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).getExpectedShardSize(), sizes[i]);
}
logger.info("start the replica shards, rebalancing should start");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
// we only allow one relocation at a time
assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5));
assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(5));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
int num = 0;
for (ShardRouting routing : clusterState.routingTable().index("test").shard(i).shards()) {
if (routing.state() == RELOCATING || routing.state() == INITIALIZING) {
assertEquals(routing.getExpectedShardSize(), sizes[i]);
num++;
}
}
assertTrue(num > 0);
}
logger.info("complete relocation, other half of relocation should happen");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
// we now only relocate 3, since 2 remain where they are!
assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(7));
assertThat(clusterState.routingTable().shardsWithState(RELOCATING).size(), equalTo(3));
for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) {
for (ShardRouting routing : clusterState.routingTable().index("test").shard(i).shards()) {
if (routing.state() == RELOCATING || routing.state() == INITIALIZING) {
assertEquals(routing.getExpectedShardSize(), sizes[i]);
}
}
}
logger.info("complete relocation, that's it!");
clusterState = startInitializingShardsAndReroute(strategy, clusterState);
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(10));
// make sure we have an even relocation
for (RoutingNode routingNode : routingNodes) {
assertThat(routingNode.size(), equalTo(1));
}
}
use of org.opensearch.cluster.ClusterInfo in project OpenSearch by opensearch-project.
the class ClusterAllocationExplainIT method testWorseBalance.
public void testWorseBalance() throws Exception {
logger.info("--> starting a single node");
internalCluster().startNode();
ensureStableCluster(1);
prepareIndex(5, 0);
logger.info("--> setting balancing threshold really high, so it won't be met");
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.balance.threshold", 1000.0f)).get();
logger.info("--> starting another node, with the rebalance threshold so high, it should not get any shards");
internalCluster().startNode();
ensureStableCluster(2);
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardRoutingState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertTrue(isPrimary);
// verify current node info
assertEquals(ShardRoutingState.STARTED, shardRoutingState);
assertNotNull(currentNode);
// verify unassigned info
assertNull(unassignedInfo);
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 2);
// verify decision object
assertFalse(allocateDecision.isDecisionTaken());
assertTrue(moveDecision.isDecisionTaken());
assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision());
assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", moveDecision.getExplanation());
assertTrue(moveDecision.canRemain());
assertFalse(moveDecision.forceMove());
assertTrue(moveDecision.canRebalanceCluster());
assertNotNull(moveDecision.getCanRemainDecision());
assertNull(moveDecision.getTargetNode());
assertEquals(1, moveDecision.getCurrentNodeRanking());
// verifying cluster rebalance decision object
assertNotNull(moveDecision.getClusterRebalanceDecision());
assertEquals(Decision.Type.YES, moveDecision.getClusterRebalanceDecision().type());
for (Decision d : moveDecision.getClusterRebalanceDecision().getDecisions()) {
assertEquals(Decision.Type.YES, d.type());
assertNotNull(d.getExplanation());
}
// verify node decisions
assertEquals(1, moveDecision.getNodeDecisions().size());
NodeAllocationResult result = moveDecision.getNodeDecisions().get(0);
assertNotNull(result.getNode());
assertEquals(1, result.getWeightRanking());
assertEquals(AllocationDecision.WORSE_BALANCE, result.getNodeDecision());
if (includeYesDecisions) {
assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(0));
} else {
assertEquals(0, result.getCanAllocateDecision().getDecisions().size());
}
for (Decision d : result.getCanAllocateDecision().getDecisions()) {
assertEquals(Decision.Type.YES, d.type());
assertNotNull(d.getExplanation());
}
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.STARTED);
parser.nextToken();
assertEquals("can_remain_on_current_node", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.YES.toString(), parser.text());
parser.nextToken();
assertEquals("can_rebalance_cluster", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.YES.toString(), parser.text());
parser.nextToken();
assertEquals("can_rebalance_to_other_node", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.NO.toString(), parser.text());
parser.nextToken();
assertEquals("rebalance_explanation", parser.currentName());
parser.nextToken();
assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", parser.text());
verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.WORSE_BALANCE, true), includeYesDecisions, false);
assertEquals(Token.END_OBJECT, parser.nextToken());
}
}
use of org.opensearch.cluster.ClusterInfo in project OpenSearch by opensearch-project.
the class ClusterAllocationExplainIT method testAssignedReplicaOnSpecificNode.
public void testAssignedReplicaOnSpecificNode() throws Exception {
logger.info("--> starting 3 nodes");
List<String> nodes = internalCluster().startNodes(3);
String excludedNode = nodes.get(randomIntBetween(0, 2));
prepareIndex(randomIndexState(), 1, 2, Settings.builder().put("index.routing.allocation.exclude._name", excludedNode).build(), ActiveShardCount.from(2));
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(false, replicaNode().getId(), includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardRoutingState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertFalse(isPrimary);
// verify current node info
assertEquals(ShardRoutingState.STARTED, shardRoutingState);
assertNotNull(currentNode);
assertEquals(replicaNode().getName(), currentNode.getName());
// verify unassigned info
assertNull(unassignedInfo);
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 3);
// verify decision objects
assertFalse(allocateDecision.isDecisionTaken());
assertTrue(moveDecision.isDecisionTaken());
assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision());
assertEquals("rebalancing is not allowed", moveDecision.getExplanation());
assertTrue(moveDecision.canRemain());
assertFalse(moveDecision.forceMove());
assertFalse(moveDecision.canRebalanceCluster());
assertNotNull(moveDecision.getCanRemainDecision());
assertNull(moveDecision.getTargetNode());
// verifying cluster rebalance decision object
assertNotNull(moveDecision.getClusterRebalanceDecision());
assertEquals(Decision.Type.NO, moveDecision.getClusterRebalanceDecision().type());
// verify node decisions
assertEquals(2, moveDecision.getNodeDecisions().size());
for (NodeAllocationResult result : moveDecision.getNodeDecisions()) {
assertNotNull(result.getNode());
assertEquals(1, result.getWeightRanking());
assertEquals(AllocationDecision.NO, result.getNodeDecision());
if (includeYesDecisions) {
assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1));
} else {
assertEquals(1, result.getCanAllocateDecision().getDecisions().size());
}
for (Decision d : result.getCanAllocateDecision().getDecisions()) {
if (d.type() == Decision.Type.NO) {
assertThat(d.label(), is(oneOf("filter", "same_shard")));
}
assertNotNull(d.getExplanation());
}
}
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, false, includeDiskInfo, ShardRoutingState.STARTED);
parser.nextToken();
assertEquals("can_remain_on_current_node", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.YES.toString(), parser.text());
parser.nextToken();
assertEquals("can_rebalance_cluster", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.NO.toString(), parser.text());
parser.nextToken();
assertEquals("can_rebalance_cluster_decisions", parser.currentName());
verifyDeciders(parser, AllocationDecision.NO);
parser.nextToken();
assertEquals("can_rebalance_to_other_node", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.NO.toString(), parser.text());
parser.nextToken();
assertEquals("rebalance_explanation", parser.currentName());
parser.nextToken();
assertEquals("rebalancing is not allowed", parser.text());
verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.NO, false), includeYesDecisions, false);
assertEquals(Token.END_OBJECT, parser.nextToken());
}
}
Aggregations