use of org.elasticsearch.cluster.routing.ShardRoutingState in project elasticsearch by elastic.
the class CatAllocationTestCase method testRun.
public void testRun() throws IOException {
Set<String> nodes = new HashSet<>();
Map<String, Idx> indices = new HashMap<>();
try (BufferedReader reader = Files.newBufferedReader(getCatPath(), StandardCharsets.UTF_8)) {
String line = null;
// regexp FTW
Pattern pattern = Pattern.compile("^(.+)\\s+(\\d)\\s+([rp])\\s+(STARTED|RELOCATING|INITIALIZING|UNASSIGNED)" + "\\s+\\d+\\s+[0-9.a-z]+\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+).*$");
while ((line = reader.readLine()) != null) {
final Matcher matcher;
if ((matcher = pattern.matcher(line)).matches()) {
final String index = matcher.group(1);
Idx idx = indices.get(index);
if (idx == null) {
idx = new Idx(index);
indices.put(index, idx);
}
final int shard = Integer.parseInt(matcher.group(2));
final boolean primary = matcher.group(3).equals("p");
ShardRoutingState state = ShardRoutingState.valueOf(matcher.group(4));
String ip = matcher.group(5);
nodes.add(ip);
ShardRouting routing = TestShardRouting.newShardRouting(index, shard, ip, null, primary, state);
idx.add(routing);
logger.debug("Add routing {}", routing);
} else {
fail("can't read line: " + line);
}
}
}
logger.info("Building initial routing table");
MetaData.Builder builder = MetaData.builder();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
for (Idx idx : indices.values()) {
IndexMetaData.Builder idxMetaBuilder = IndexMetaData.builder(idx.name).settings(settings(Version.CURRENT)).numberOfShards(idx.numShards()).numberOfReplicas(idx.numReplicas());
for (ShardRouting shardRouting : idx.routing) {
if (shardRouting.active()) {
Set<String> allocationIds = idxMetaBuilder.getInSyncAllocationIds(shardRouting.id());
if (allocationIds == null) {
allocationIds = new HashSet<>();
} else {
allocationIds = new HashSet<>(allocationIds);
}
allocationIds.add(shardRouting.allocationId().getId());
idxMetaBuilder.putInSyncAllocationIds(shardRouting.id(), allocationIds);
}
}
IndexMetaData idxMeta = idxMetaBuilder.build();
builder.put(idxMeta, false);
IndexRoutingTable.Builder tableBuilder = new IndexRoutingTable.Builder(idxMeta.getIndex()).initializeAsRecovery(idxMeta);
Map<Integer, IndexShardRoutingTable> shardIdToRouting = new HashMap<>();
for (ShardRouting r : idx.routing) {
IndexShardRoutingTable refData = new IndexShardRoutingTable.Builder(r.shardId()).addShard(r).build();
if (shardIdToRouting.containsKey(r.getId())) {
refData = new IndexShardRoutingTable.Builder(shardIdToRouting.get(r.getId())).addShard(r).build();
}
shardIdToRouting.put(r.getId(), refData);
}
for (IndexShardRoutingTable t : shardIdToRouting.values()) {
tableBuilder.addIndexShard(t);
}
IndexRoutingTable table = tableBuilder.build();
routingTableBuilder.add(table);
}
MetaData metaData = builder.build();
RoutingTable routingTable = routingTableBuilder.build();
DiscoveryNodes.Builder builderDiscoNodes = DiscoveryNodes.builder();
for (String node : nodes) {
builderDiscoNodes.add(newNode(node));
}
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build();
if (balanceFirst()) {
clusterState = rebalance(clusterState);
}
clusterState = allocateNew(clusterState);
}
use of org.elasticsearch.cluster.routing.ShardRoutingState in project elasticsearch by elastic.
the class IndicesClusterStateServiceRandomUpdatesTests method testJoiningNewClusterOnlyRemovesInMemoryIndexStructures.
/**
* This test ensures that when a node joins a brand new cluster (different cluster UUID),
* different from the cluster it was previously a part of, the in-memory index data structures
* are all removed but the on disk contents of those indices remain so that they can later be
* imported as dangling indices. Normally, the first cluster state update that the node
* receives from the new cluster would contain a cluster block that would cause all in-memory
* structures to be removed (see {@link IndicesClusterStateService#applyClusterState(ClusterChangedEvent)}),
* but in the case where the node joined and was a few cluster state updates behind, it would
* not have received the cluster block, in which case we still need to remove the in-memory
* structures while ensuring the data remains on disk. This test executes this particular
* scenario.
*/
public void testJoiningNewClusterOnlyRemovesInMemoryIndexStructures() {
// a cluster state derived from the initial state that includes a created index
String name = "index_" + randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
ShardRoutingState[] replicaStates = new ShardRoutingState[randomIntBetween(0, 3)];
Arrays.fill(replicaStates, ShardRoutingState.INITIALIZING);
ClusterState stateWithIndex = ClusterStateCreationUtils.state(name, randomBoolean(), ShardRoutingState.INITIALIZING, replicaStates);
// the initial state which is derived from the newly created cluster state but doesn't contain the index
ClusterState initialState = ClusterState.builder(stateWithIndex).metaData(MetaData.builder(stateWithIndex.metaData()).remove(name)).routingTable(RoutingTable.builder().build()).build();
// pick a data node to simulate the adding an index cluster state change event on, that has shards assigned to it
DiscoveryNode node = stateWithIndex.nodes().get(randomFrom(stateWithIndex.routingTable().index(name).shardsWithState(INITIALIZING)).currentNodeId());
// simulate the cluster state change on the node
ClusterState localState = adaptClusterStateToLocalNode(stateWithIndex, node);
ClusterState previousLocalState = adaptClusterStateToLocalNode(initialState, node);
IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new);
indicesCSSvc.start();
indicesCSSvc.applyClusterState(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState));
// create a new empty cluster state with a brand new cluster UUID
ClusterState newClusterState = ClusterState.builder(initialState).metaData(MetaData.builder(initialState.metaData()).clusterUUID(UUIDs.randomBase64UUID())).build();
// simulate the cluster state change on the node
localState = adaptClusterStateToLocalNode(newClusterState, node);
previousLocalState = adaptClusterStateToLocalNode(stateWithIndex, node);
indicesCSSvc.applyClusterState(new ClusterChangedEvent("cluster state change with a new cluster UUID (and doesn't contain the index)", localState, previousLocalState));
// check that in memory data structures have been removed once the new cluster state is applied,
// but the persistent data is still there
RecordingIndicesService indicesService = (RecordingIndicesService) indicesCSSvc.indicesService;
for (IndexMetaData indexMetaData : stateWithIndex.metaData()) {
Index index = indexMetaData.getIndex();
assertNull(indicesService.indexService(index));
assertFalse(indicesService.isDeleted(index));
}
}
use of org.elasticsearch.cluster.routing.ShardRoutingState in project elasticsearch by elastic.
the class ClusterAllocationExplainIT method testAllocationFilteringOnIndexCreation.
public void testAllocationFilteringOnIndexCreation() throws Exception {
logger.info("--> starting 2 nodes");
internalCluster().startNodes(2);
logger.info("--> creating an index with 1 primary, 0 replicas, with allocation filtering so the primary can't be assigned");
createIndexAndIndexData(1, 0, Settings.builder().put("index.routing.allocation.include._name", "non_existent_node").build(), ActiveShardCount.NONE);
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardRoutingState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertTrue(isPrimary);
// verify current node info
assertNotEquals(ShardRoutingState.STARTED, shardRoutingState);
assertNull(currentNode);
// verify unassigned info
assertNotNull(unassignedInfo);
assertEquals(Reason.INDEX_CREATED, unassignedInfo.getReason());
assertEquals(AllocationStatus.DECIDERS_NO, unassignedInfo.getLastAllocationStatus());
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 2);
// verify decision objects
assertTrue(allocateDecision.isDecisionTaken());
assertFalse(moveDecision.isDecisionTaken());
assertEquals(AllocationDecision.NO, allocateDecision.getAllocationDecision());
assertEquals("cannot allocate because allocation is not permitted to any of the nodes", allocateDecision.getExplanation());
assertNull(allocateDecision.getAllocationId());
assertNull(allocateDecision.getTargetNode());
assertEquals(0L, allocateDecision.getConfiguredDelayInMillis());
assertEquals(0L, allocateDecision.getRemainingDelayInMillis());
assertEquals(2, allocateDecision.getNodeDecisions().size());
for (NodeAllocationResult result : allocateDecision.getNodeDecisions()) {
assertNotNull(result.getNode());
assertEquals(AllocationDecision.NO, result.getNodeDecision());
if (includeYesDecisions) {
assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(1));
} else {
assertEquals(1, result.getCanAllocateDecision().getDecisions().size());
}
for (Decision d : result.getCanAllocateDecision().getDecisions()) {
if (d.label().equals("filter")) {
assertEquals(Decision.Type.NO, d.type());
assertEquals("node does not match index setting [index.routing.allocation.include] filters " + "[_name:\"non_existent_node\"]", d.getExplanation());
}
}
}
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED);
parser.nextToken();
assertEquals("can_allocate", parser.currentName());
parser.nextToken();
String allocationDecision = parser.text();
assertTrue(allocationDecision.equals(AllocationDecision.NO.toString()) || allocationDecision.equals(AllocationDecision.AWAITING_INFO.toString()));
parser.nextToken();
assertEquals("allocate_explanation", parser.currentName());
parser.nextToken();
if (allocationDecision.equals("awaiting_info")) {
assertEquals("cannot allocate because information about existing shard data is still being retrieved " + "from some of the nodes", parser.text());
} else {
assertEquals("cannot allocate because allocation is not permitted to any of the nodes", parser.text());
}
Map<String, AllocationDecision> nodeDecisions = new HashMap<>();
for (String nodeName : internalCluster().getNodeNames()) {
nodeDecisions.put(nodeName, AllocationDecision.NO);
}
verifyNodeDecisions(parser, nodeDecisions, includeYesDecisions, false);
assertEquals(Token.END_OBJECT, parser.nextToken());
}
}
use of org.elasticsearch.cluster.routing.ShardRoutingState in project elasticsearch by elastic.
the class ClusterAllocationExplainIT method testUnassignedPrimaryWithExistingIndex.
public void testUnassignedPrimaryWithExistingIndex() throws Exception {
logger.info("--> starting 2 nodes");
internalCluster().startNodes(2);
logger.info("--> creating an index with 1 primary, 0 replicas");
createIndexAndIndexData(1, 0);
logger.info("--> stopping the node with the primary");
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName()));
ensureStableCluster(1);
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertTrue(isPrimary);
// verify current node info
assertNotEquals(ShardRoutingState.STARTED, shardState);
assertNull(currentNode);
// verify unassigned info
assertNotNull(unassignedInfo);
assertEquals(Reason.NODE_LEFT, unassignedInfo.getReason());
assertTrue(unassignedInfo.getLastAllocationStatus() == AllocationStatus.FETCHING_SHARD_DATA || unassignedInfo.getLastAllocationStatus() == AllocationStatus.NO_VALID_SHARD_COPY);
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 1);
// verify decision objects
assertTrue(allocateDecision.isDecisionTaken());
assertFalse(moveDecision.isDecisionTaken());
assertTrue(allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY || allocateDecision.getAllocationDecision() == AllocationDecision.AWAITING_INFO);
if (allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY) {
assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be " + "found on the nodes in the cluster", allocateDecision.getExplanation());
} else {
assertEquals("cannot allocate because information about existing shard data is still being retrieved from some of the nodes", allocateDecision.getExplanation());
}
assertNull(allocateDecision.getAllocationId());
assertNull(allocateDecision.getTargetNode());
assertEquals(0L, allocateDecision.getConfiguredDelayInMillis());
assertEquals(0L, allocateDecision.getRemainingDelayInMillis());
if (allocateDecision.getAllocationDecision() == AllocationDecision.NO_VALID_SHARD_COPY) {
assertEquals(1, allocateDecision.getNodeDecisions().size());
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.UNASSIGNED);
parser.nextToken();
assertEquals("can_allocate", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.NO_VALID_SHARD_COPY.toString(), parser.text());
parser.nextToken();
assertEquals("allocate_explanation", parser.currentName());
parser.nextToken();
assertEquals("cannot allocate because a previous copy of the primary shard existed but can no longer be found " + "on the nodes in the cluster", parser.text());
verifyStaleShardCopyNodeDecisions(parser, 1, Collections.emptySet());
}
}
}
use of org.elasticsearch.cluster.routing.ShardRoutingState in project elasticsearch by elastic.
the class ClusterAllocationExplainIT method testWorseBalance.
public void testWorseBalance() throws Exception {
logger.info("--> starting a single node");
internalCluster().startNode();
ensureStableCluster(1);
logger.info("--> creating an index with 5 shards, all allocated to the single node");
createIndexAndIndexData(5, 0);
logger.info("--> setting balancing threshold really high, so it won't be met");
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.balance.threshold", 1000.0f)).get();
logger.info("--> starting another node, with the rebalance threshold so high, it should not get any shards");
internalCluster().startNode();
ensureStableCluster(2);
boolean includeYesDecisions = randomBoolean();
boolean includeDiskInfo = randomBoolean();
ClusterAllocationExplanation explanation = runExplain(true, includeYesDecisions, includeDiskInfo);
ShardId shardId = explanation.getShard();
boolean isPrimary = explanation.isPrimary();
ShardRoutingState shardRoutingState = explanation.getShardState();
DiscoveryNode currentNode = explanation.getCurrentNode();
UnassignedInfo unassignedInfo = explanation.getUnassignedInfo();
ClusterInfo clusterInfo = explanation.getClusterInfo();
AllocateUnassignedDecision allocateDecision = explanation.getShardAllocationDecision().getAllocateDecision();
MoveDecision moveDecision = explanation.getShardAllocationDecision().getMoveDecision();
// verify shard info
assertEquals("idx", shardId.getIndexName());
assertEquals(0, shardId.getId());
assertTrue(isPrimary);
// verify current node info
assertEquals(ShardRoutingState.STARTED, shardRoutingState);
assertNotNull(currentNode);
// verify unassigned info
assertNull(unassignedInfo);
// verify cluster info
verifyClusterInfo(clusterInfo, includeDiskInfo, 2);
// verify decision object
assertFalse(allocateDecision.isDecisionTaken());
assertTrue(moveDecision.isDecisionTaken());
assertEquals(AllocationDecision.NO, moveDecision.getAllocationDecision());
assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", moveDecision.getExplanation());
assertTrue(moveDecision.canRemain());
assertFalse(moveDecision.forceMove());
assertTrue(moveDecision.canRebalanceCluster());
assertNotNull(moveDecision.getCanRemainDecision());
assertNull(moveDecision.getTargetNode());
assertEquals(1, moveDecision.getCurrentNodeRanking());
// verifying cluster rebalance decision object
assertNotNull(moveDecision.getClusterRebalanceDecision());
assertEquals(Decision.Type.YES, moveDecision.getClusterRebalanceDecision().type());
for (Decision d : moveDecision.getClusterRebalanceDecision().getDecisions()) {
assertEquals(Decision.Type.YES, d.type());
assertNotNull(d.getExplanation());
}
// verify node decisions
assertEquals(1, moveDecision.getNodeDecisions().size());
NodeAllocationResult result = moveDecision.getNodeDecisions().get(0);
assertNotNull(result.getNode());
assertEquals(1, result.getWeightRanking());
assertEquals(AllocationDecision.WORSE_BALANCE, result.getNodeDecision());
if (includeYesDecisions) {
assertThat(result.getCanAllocateDecision().getDecisions().size(), greaterThan(0));
} else {
assertEquals(0, result.getCanAllocateDecision().getDecisions().size());
}
for (Decision d : result.getCanAllocateDecision().getDecisions()) {
assertEquals(Decision.Type.YES, d.type());
assertNotNull(d.getExplanation());
}
// verify JSON output
try (XContentParser parser = getParser(explanation)) {
verifyShardInfo(parser, true, includeDiskInfo, ShardRoutingState.STARTED);
parser.nextToken();
assertEquals("can_remain_on_current_node", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.YES.toString(), parser.text());
parser.nextToken();
assertEquals("can_rebalance_cluster", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.YES.toString(), parser.text());
parser.nextToken();
assertEquals("can_rebalance_to_other_node", parser.currentName());
parser.nextToken();
assertEquals(AllocationDecision.NO.toString(), parser.text());
parser.nextToken();
assertEquals("rebalance_explanation", parser.currentName());
parser.nextToken();
assertEquals("cannot rebalance as no target node exists that can both allocate this shard and improve the cluster balance", parser.text());
verifyNodeDecisions(parser, allNodeDecisions(AllocationDecision.WORSE_BALANCE, true), includeYesDecisions, false);
assertEquals(Token.END_OBJECT, parser.nextToken());
}
}
Aggregations