use of org.elasticsearch.cluster.ClusterInfo in project elasticsearch by elastic.
the class DiskThresholdDeciderUnitTests method testCanRemainUsesLeastAvailableSpace.
public void testCanRemainUsesLeastAvailableSpace() {
ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss);
ImmutableOpenMap.Builder<ShardRouting, String> shardRoutingMap = ImmutableOpenMap.builder();
DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);
DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build();
final IndexMetaData indexMetaData = metaData.index("test");
ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_0 = ShardRoutingHelper.initialize(test_0, node_0.getId());
test_0 = ShardRoutingHelper.moveToStarted(test_0);
shardRoutingMap.put(test_0, "/node0/least");
ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_1 = ShardRoutingHelper.initialize(test_1, node_1.getId());
test_1 = ShardRoutingHelper.moveToStarted(test_1);
shardRoutingMap.put(test_1, "/node1/least");
ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_2 = ShardRoutingHelper.initialize(test_2, node_1.getId());
test_2 = ShardRoutingHelper.moveToStarted(test_2);
shardRoutingMap.put(test_2, "/node1/most");
ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, StoreRecoverySource.EMPTY_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
test_3 = ShardRoutingHelper.initialize(test_3, node_1.getId());
test_3 = ShardRoutingHelper.moveToStarted(test_3);
// Intentionally not in the shardRoutingMap. We want to test what happens when we don't know where it is.
RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node_0).add(node_1)).build();
// actual test -- after all that bloat :)
ImmutableOpenMap.Builder<String, DiskUsage> leastAvailableUsages = ImmutableOpenMap.builder();
// 90% used
leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "/node0/least", 100, 10));
// 91% used
leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "/node1/least", 100, 9));
ImmutableOpenMap.Builder<String, DiskUsage> mostAvailableUsage = ImmutableOpenMap.builder();
// 10% used
mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "/node0/most", 100, 90));
// 10% used
mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "/node1/most", 100, 90));
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
// 10 bytes
shardSizes.put("[test][0][p]", 10L);
shardSizes.put("[test][1][p]", 10L);
shardSizes.put("[test][2][p]", 10L);
final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build());
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime(), false);
allocation.debugDecision(true);
Decision decision = decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation);
assertEquals(Decision.Type.YES, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("there is enough disk on this node for the shard to remain, free: [10b]"));
decision = decider.canRemain(test_1, new RoutingNode("node_1", node_1), allocation);
assertEquals(Decision.Type.NO, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("the shard cannot remain on this node because it is " + "above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=90%] and there is less than " + "the required [10.0%] free disk on node, actual free: [9.0%]"));
try {
decider.canRemain(test_0, new RoutingNode("node_1", node_1), allocation);
fail("not allocated on this node");
} catch (IllegalArgumentException ex) {
// not allocated on that node
}
try {
decider.canRemain(test_1, new RoutingNode("node_0", node_0), allocation);
fail("not allocated on this node");
} catch (IllegalArgumentException ex) {
// not allocated on that node
}
decision = decider.canRemain(test_2, new RoutingNode("node_1", node_1), allocation);
assertEquals("can stay since allocated on a different path with enough space", Decision.Type.YES, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("this shard is not allocated on the most utilized disk and can remain"));
decision = decider.canRemain(test_2, new RoutingNode("node_1", node_1), allocation);
assertEquals("can stay since we don't have information about this shard", Decision.Type.YES, decision.type());
assertThat(((Decision.Single) decision).getExplanation(), containsString("this shard is not allocated on the most utilized disk and can remain"));
}
use of org.elasticsearch.cluster.ClusterInfo in project elasticsearch by elastic.
the class TransportClusterAllocationExplainAction method masterOperation.
@Override
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state, final ActionListener<ClusterAllocationExplainResponse> listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state, clusterInfo, System.nanoTime(), false);
ShardRouting shardRouting = findShardToExplain(request, allocation);
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator);
listener.onResponse(new ClusterAllocationExplainResponse(cae));
}
use of org.elasticsearch.cluster.ClusterInfo in project elasticsearch by elastic.
the class ExpectedShardSizeAllocationTests method testInitializingHasExpectedSize.
public void testInitializingHasExpectedSize() {
final long byteSize = randomIntBetween(0, Integer.MAX_VALUE);
AllocationService strategy = createAllocationService(Settings.EMPTY, new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
return new ClusterInfo() {
@Override
public Long getShardSize(ShardRouting shardRouting) {
if (shardRouting.getIndexName().equals("test") && shardRouting.shardId().getId() == 0) {
return byteSize;
}
return null;
}
};
}
@Override
public void addListener(Listener listener) {
}
});
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))).build();
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
assertEquals(1, clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING));
assertEquals(byteSize, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize());
logger.info("Start the primary shard");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
assertEquals(1, clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED));
assertEquals(1, clusterState.getRoutingNodes().unassigned().size());
logger.info("Add another one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
clusterState = strategy.reroute(clusterState, "reroute");
assertEquals(1, clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING));
assertEquals(byteSize, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize());
}
use of org.elasticsearch.cluster.ClusterInfo in project elasticsearch by elastic.
the class ClusterAllocationExplainIT method testAllocationFilteringOnIndexCreation.
public void testAllocationFilteringOnIndexCreation() throws Exception {
logger.info("--> starting 2 nodes");
internalCluster().startNodes(2);
logger.info("--> creating an index with 1 primary, 0 replicas, with allocation filtering so the primary can't be assigned");
createIndexAndIndexData(1, 0, Settings.builder().put("index.routing.allocation.include._name", "non_existent_node").build(), ActiveShardCount.NONE);
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardRoutingState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertTrue(isPrimary);
// verify current node info
assertNotEquals(ShardRoutingState.STARTED, shardRoutingState);
assertNull(currentNode);
// verify unassigned info
assertNotNull(unassignedInfo);
assertEquals(Reason.INDEX_CREATED, unassignedInfo.getReason());
assertEquals(AllocationStatus.DECIDERS_NO, unassignedInfo.getLastAllocationStatus());
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 2);
// verify decision objects
assertTrue(allocateDecision.isDecisionTaken());
assertFalse(moveDecision.isDecisionTaken());
assertEquals(AllocationDecision.NO, allocateDecision.getAllocationDecision());
assertEquals("cannot allocate because allocation is not permitted to any of the nodes", allocateDecision.getExplanation());
assertNull(allocateDecision.getAllocationId());
assertNull(allocateDecision.getTargetNode());
assertEquals(0L, allocateDecision.getConfiguredDelayInMillis());
assertEquals(0L, allocateDecision.getRemainingDelayInMillis());
assertEquals(2, allocateDecision.getNodeDecisions().size());
for (NodeAllocationResult result : allocateDecision.getNodeDecisions()) {
assertNotNull(result.getNode());
assertEquals(AllocationDecision.NO, result.getNodeDecision());
if (includeYesDecisions) {
assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1));
} else {
assertEquals(1, result.getCanAllocateDecision().getDecisions().size());
}
for (Decision d : result.getCanAllocateDecision().getDecisions()) {
if (d.label().equals("filter")) {
assertEquals(Decision.Type.NO, d.type());
assertEquals("node does not match index setting [index.routing.allocation.include] filters " + "[_name:\"non_existent_node\"]", d.getExplanation());
}
}
}
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED);
parser.nextToken();
assertEquals("can_allocate", parser.currentName());
parser.nextToken();
String allocationDecision = parser.text();
assertTrue(allocationDecision.equals(AllocationDecision.NO.toString()) || allocationDecision.equals(AllocationDecision.AWAITING_INFO.toString()));
parser.nextToken();
assertEquals("allocate_explanation", parser.currentName());
parser.nextToken();
if (allocationDecision.equals("awaiting_info")) {
assertEquals("cannot allocate because information about existing shard data is still being retrieved " + "from some of the nodes", parser.text());
} else {
assertEquals("cannot allocate because allocation is not permitted to any of the nodes", parser.text());
}
Map<String, AllocationDecision> nodeDecisions = new HashMap<>();
for (String nodeName : internalCluster().getNodeNames()) {
nodeDecisions.put(nodeName, AllocationDecision.NO);
}
verifyNodeDecisions(parser, nodeDecisions, includeYesDecisions, false);
assertEquals(Token.END_OBJECT, parser.nextToken());
}
}
use of org.elasticsearch.cluster.ClusterInfo in project elasticsearch by elastic.
the class ClusterAllocationExplainIT method testUnassignedPrimaryWithExistingIndex.
public void testUnassignedPrimaryWithExistingIndex() throws Exception {
logger.info("--> starting 2 nodes");
internalCluster().startNodes(2);
logger.info("--> creating an index with 1 primary, 0 replicas");
createIndexAndIndexData(1, 0);
logger.info("--> stopping the node with the primary");
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName()));
ensureStableCluster(1);
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertTrue(isPrimary);
// verify current node info
assertNotEquals(ShardRoutingState.STARTED, shardState);
assertNull(currentNode);
// verify unassigned info
assertNotNull(unassignedInfo);
assertEquals(Reason.NODE_LEFT, unassignedInfo.getReason());
assertTrue(unassignedInfo.getLastAllocationStatus() == AllocationStatus.FETCHING_SHARD_DATA || unassignedInfo.getLastAllocationStatus() == AllocationStatus.NO_VALID_SHARD_COPY);
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 1);
// verify decision objects
assertTrue(allocateDecision.isDecisionTaken());
assertFalse(moveDecision.isDecisionTaken());
assertTrue(allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY || allocateDecision.getAllocationDecision() == AllocationDecision.AWAITING_INFO);
if (allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY) {
assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be " + "found on the nodes in the cluster", allocateDecision.getExplanation());
} else {
assertEquals("cannot allocate because information about existing shard data is still being retrieved from some of the nodes", allocateDecision.getExplanation());
}
assertNull(allocateDecision.getAllocationId());
assertNull(allocateDecision.getTargetNode());
assertEquals(0L, allocateDecision.getConfiguredDelayInMillis());
assertEquals(0L, allocateDecision.getRemainingDelayInMillis());
if (allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY) {
assertEquals(1, allocateDecision.getNodeDecisions().size());
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED);
parser.nextToken();
assertEquals("can_allocate", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.NO_VALID_SHARD_COPY.toString(), parser.text());
parser.nextToken();
assertEquals("allocate_explanation", parser.currentName());
parser.nextToken();
assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be found " + "on the nodes in the cluster", parser.text());
verifyStaleShardCopyNodeDecisions(parser, 1, Collections.emptySet());
}
}
}
Aggregations