Search in sources :

Example 91 with DiscoveryNode

use of org.elasticsearch.cluster.node.DiscoveryNode in project elasticsearch by elastic.

the class TransportClientNodesServiceTests method checkRemoveAddress.

private void checkRemoveAddress(boolean sniff) {
    Object[] extraSettings = { TransportClient.CLIENT_TRANSPORT_SNIFF.getKey(), sniff };
    try (TestIteration iteration = new TestIteration(extraSettings)) {
        final TransportClientNodesService service = iteration.transportClientNodesService;
        assertEquals(iteration.listNodesCount + iteration.sniffNodesCount, service.connectedNodes().size());
        final TransportAddress addressToRemove = randomFrom(iteration.listNodeAddresses);
        service.removeTransportAddress(addressToRemove);
        assertThat(service.connectedNodes(), everyItem(not(new CustomMatcher<DiscoveryNode>("removed address") {

            @Override
            public boolean matches(Object item) {
                return item instanceof DiscoveryNode && ((DiscoveryNode) item).getAddress().equals(addressToRemove);
            }
        })));
        assertEquals(iteration.listNodesCount + iteration.sniffNodesCount - 1, service.connectedNodes().size());
    }
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) TransportAddress(org.elasticsearch.common.transport.TransportAddress)

Example 92 with DiscoveryNode

use of org.elasticsearch.cluster.node.DiscoveryNode in project elasticsearch by elastic.

the class ClusterChangedEventTests method createDiscoveryNodes.

// Create the discovery nodes for a cluster state.  For our testing purposes, we want
// the first to be master, the second to be master eligible, the third to be a data node,
// and the remainder can be any kinds of nodes (master eligible, data, or both).
private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalMaster) {
    assert (numNodes >= 3) : "the initial cluster state for event change tests should have a minimum of 3 nodes " + "so there are a minimum of 2 master nodes for testing master change events.";
    final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
    // randomly assign the local node if not master
    final int localNodeIndex = isLocalMaster ? 0 : randomIntBetween(1, numNodes - 1);
    for (int i = 0; i < numNodes; i++) {
        final String nodeId = NODE_ID_PREFIX + i;
        Set<DiscoveryNode.Role> roles = new HashSet<>();
        if (i == 0) {
            // the master node
            builder.masterNodeId(nodeId);
            roles.add(DiscoveryNode.Role.MASTER);
        } else if (i == 1) {
            // the alternate master node
            roles.add(DiscoveryNode.Role.MASTER);
        } else if (i == 2) {
            // we need at least one data node
            roles.add(DiscoveryNode.Role.DATA);
        } else {
            // remaining nodes can be anything (except for master)
            if (randomBoolean()) {
                roles.add(DiscoveryNode.Role.MASTER);
            }
            if (randomBoolean()) {
                roles.add(DiscoveryNode.Role.DATA);
            }
        }
        final DiscoveryNode node = newNode(nodeId, roles);
        builder.add(node);
        if (i == localNodeIndex) {
            builder.localNodeId(nodeId);
        }
    }
    return builder.build();
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) HashSet(java.util.HashSet)

Example 93 with DiscoveryNode

use of org.elasticsearch.cluster.node.DiscoveryNode in project elasticsearch by elastic.

the class ThrottlingAllocationTests method createRecoveryStateAndInitalizeAllocations.

private ClusterState createRecoveryStateAndInitalizeAllocations(MetaData metaData, TestGatewayAllocator gatewayAllocator) {
    DiscoveryNode node1 = newNode("node1");
    MetaData.Builder metaDataBuilder = new MetaData.Builder(metaData);
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
        Index index = cursor.value.getIndex();
        IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(cursor.value);
        final int recoveryType = randomInt(5);
        if (recoveryType <= 4) {
            addInSyncAllocationIds(index, indexMetaDataBuilder, gatewayAllocator, node1);
        }
        IndexMetaData indexMetaData = indexMetaDataBuilder.build();
        metaDataBuilder.put(indexMetaData, false);
        switch(recoveryType) {
            case 0:
                routingTableBuilder.addAsRecovery(indexMetaData);
                break;
            case 1:
                routingTableBuilder.addAsFromCloseToOpen(indexMetaData);
                break;
            case 2:
                routingTableBuilder.addAsFromDangling(indexMetaData);
                break;
            case 3:
                routingTableBuilder.addAsNewRestore(indexMetaData, new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT, indexMetaData.getIndex().getName()), new IntHashSet());
                break;
            case 4:
                routingTableBuilder.addAsRestore(indexMetaData, new SnapshotRecoverySource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT, indexMetaData.getIndex().getName()));
                break;
            case 5:
                routingTableBuilder.addAsNew(indexMetaData);
                break;
            default:
                throw new IndexOutOfBoundsException();
        }
    }
    return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(DiscoveryNodes.builder().add(node1)).metaData(metaDataBuilder.build()).routingTable(routingTableBuilder.build()).build();
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) IntHashSet(com.carrotsearch.hppc.IntHashSet) Index(org.elasticsearch.index.Index) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) SnapshotRecoverySource(org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData)

Example 94 with DiscoveryNode

use of org.elasticsearch.cluster.node.DiscoveryNode in project elasticsearch by elastic.

the class DiskThresholdDeciderTests method testCanRemainWithShardRelocatingAway.

public void testCanRemainWithShardRelocatingAway() {
    Settings diskSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true).put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build();
    // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available
    ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
    // 80% used
    usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 20));
    // 0% used
    usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 100));
    ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
    ImmutableOpenMap.Builder<String, Long> shardSizesBuilder = ImmutableOpenMap.builder();
    shardSizesBuilder.put("[test][0][p]", 40L);
    shardSizesBuilder.put("[test][1][p]", 40L);
    shardSizesBuilder.put("[foo][0][p]", 10L);
    ImmutableOpenMap<String, Long> shardSizes = shardSizesBuilder.build();
    final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes);
    DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings);
    MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)).put(IndexMetaData.builder("foo").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("foo")).build();
    DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
    DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT);
    DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
    ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).nodes(discoveryNodes).build();
    // Two shards consuming each 80% of disk space while 70% is allowed, so shard 0 isn't allowed here
    ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
    ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", null, true, ShardRoutingState.STARTED);
    RoutingNode firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
    RoutingTable.Builder builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
    ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
    RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), false);
    routingAllocation.debugDecision(true);
    Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
    assertThat(decision.type(), equalTo(Decision.Type.NO));
    assertThat(((Decision.Single) decision).getExplanation(), containsString("the shard cannot remain on this node because it is above the high watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.high=70%] and there is less than the required [30.0%] free disk on node, " + "actual free: [20.0%]"));
    // Two shards consuming each 80% of disk space while 70% is allowed, but one is relocating, so shard 0 can stay
    firstRouting = TestShardRouting.newShardRouting("test", 0, "node1", null, true, ShardRoutingState.STARTED);
    secondRouting = TestShardRouting.newShardRouting("test", 1, "node1", "node2", true, ShardRoutingState.RELOCATING);
    ShardRouting fooRouting = TestShardRouting.newShardRouting("foo", 0, "node1", null, true, ShardRoutingState.UNASSIGNED);
    firstRoutingNode = new RoutingNode("node1", discoveryNode1, firstRouting, secondRouting);
    builder = RoutingTable.builder().add(IndexRoutingTable.builder(firstRouting.index()).addIndexShard(new IndexShardRoutingTable.Builder(firstRouting.shardId()).addShard(firstRouting).build()).addIndexShard(new IndexShardRoutingTable.Builder(secondRouting.shardId()).addShard(secondRouting).build()));
    clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
    routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), clusterState, clusterInfo, System.nanoTime(), false);
    routingAllocation.debugDecision(true);
    decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
    assertThat(decision.type(), equalTo(Decision.Type.YES));
    assertEquals("there is enough disk on this node for the shard to remain, free: [60b]", ((Decision.Single) decision).getExplanation());
    decision = diskThresholdDecider.canAllocate(fooRouting, firstRoutingNode, routingAllocation);
    assertThat(decision.type(), equalTo(Decision.Type.NO));
    if (fooRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE) {
        assertThat(((Decision.Single) decision).getExplanation(), containsString("the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=70%], using " + "more disk space than the maximum allowed [70.0%], actual free: [20.0%]"));
    } else {
        assertThat(((Decision.Single) decision).getExplanation(), containsString("the node is above the low watermark cluster setting [cluster.routing.allocation.disk.watermark.low=60%], using more " + "disk space than the maximum allowed [60.0%], actual free: [20.0%]"));
    }
    // Creating AllocationService instance and the services it depends on...
    ClusterInfoService cis = new ClusterInfoService() {

        @Override
        public ClusterInfo getClusterInfo() {
            logger.info("--> calling fake getClusterInfo");
            return clusterInfo;
        }

        @Override
        public void addListener(Listener listener) {
        // noop
        }
    };
    AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diskThresholdDecider)));
    AllocationService strategy = new AllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis);
    // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away
    // and therefor we will have sufficient disk space on node1.
    ClusterState result = strategy.reroute(clusterState, "reroute");
    assertThat(result, equalTo(clusterState));
    assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
    assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node1"));
    assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
    assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING));
    assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node1"));
    assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2"));
}
Also used : IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) RoutingNodes(org.elasticsearch.cluster.routing.RoutingNodes) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) Matchers.containsString(org.hamcrest.Matchers.containsString) DiskUsage(org.elasticsearch.cluster.DiskUsage) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) DiskThresholdSettings(org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterInfoService(org.elasticsearch.cluster.ClusterInfoService) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) DevNullClusterInfo(org.elasticsearch.cluster.MockInternalClusterInfoService.DevNullClusterInfo) ClusterInfo(org.elasticsearch.cluster.ClusterInfo) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) TestShardRouting(org.elasticsearch.cluster.routing.TestShardRouting) RoutingAllocation(org.elasticsearch.cluster.routing.allocation.RoutingAllocation)

Example 95 with DiscoveryNode

use of org.elasticsearch.cluster.node.DiscoveryNode in project elasticsearch by elastic.

the class FilterAllocationDeciderTests method testFilterInitialRecovery.

public void testFilterInitialRecovery() {
    ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY, clusterSettings);
    AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList(filterAllocationDecider, new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY)));
    AllocationService service = new AllocationService(Settings.builder().build(), allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
    ClusterState state = createInitialClusterState(service, Settings.builder().put("index.routing.allocation.initial_recovery._id", "node2").build());
    RoutingTable routingTable = state.routingTable();
    // we can initally only allocate on node2
    assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node2");
    routingTable = service.applyFailedShard(state, routingTable.index("idx").shard(0).shards().get(0)).routingTable();
    state = ClusterState.builder(state).routingTable(routingTable).build();
    assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
    assertNull(routingTable.index("idx").shard(0).shards().get(0).currentNodeId());
    // after failing the shard we are unassigned since the node is blacklisted and we can't initialize on the other node
    RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, 0, false);
    allocation.debugDecision(true);
    Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(), state.getRoutingNodes().node("node2"), allocation);
    assertEquals(Type.YES, decision.type());
    assertEquals("node passes include/exclude/require filters", decision.getExplanation());
    ShardRouting primaryShard = routingTable.index("idx").shard(0).primaryShard();
    decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(), state.getRoutingNodes().node("node1"), allocation);
    assertEquals(Type.NO, decision.type());
    if (primaryShard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
        assertEquals("initial allocation of the shrunken index is only allowed on nodes [_id:\"node2\"] that " + "hold a copy of every shard in the index", decision.getExplanation());
    } else {
        assertEquals("initial allocation of the index is only allowed on nodes [_id:\"node2\"]", decision.getExplanation());
    }
    state = service.reroute(state, "try allocate again");
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node2");
    state = service.applyStartedShards(state, routingTable.index("idx").shard(0).shardsWithState(INITIALIZING));
    routingTable = state.routingTable();
    // ok now we are started and can be allocated anywhere!! lets see...
    // first create another copy
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
    state = service.applyStartedShards(state, routingTable.index("idx").shard(0).replicaShardsWithState(INITIALIZING));
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), STARTED);
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
    // now remove the node of the other copy and fail the current
    DiscoveryNode node1 = state.nodes().resolveNode("node1");
    state = service.deassociateDeadNodes(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).remove("node1")).build(), true, "test");
    state = service.applyFailedShard(state, routingTable.index("idx").shard(0).primaryShard());
    // now bring back node1 and see it's assigned
    state = service.reroute(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).add(node1)).build(), "test");
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node1");
    allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, 0, false);
    allocation.debugDecision(true);
    decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0), state.getRoutingNodes().node("node2"), allocation);
    assertEquals(Type.YES, decision.type());
    assertEquals("node passes include/exclude/require filters", decision.getExplanation());
    decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shards().get(0), state.getRoutingNodes().node("node1"), allocation);
    assertEquals(Type.YES, decision.type());
    assertEquals("node passes include/exclude/require filters", decision.getExplanation());
}
Also used : TestGatewayAllocator(org.elasticsearch.test.gateway.TestGatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) BalancedShardsAllocator(org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator) AllocationDeciders(org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders) ReplicaAfterPrimaryActiveAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider) Decision(org.elasticsearch.cluster.routing.allocation.decider.Decision) SameShardAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) FilterAllocationDecider(org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting)

Aggregations

DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)352 ClusterState (org.elasticsearch.cluster.ClusterState)83 ArrayList (java.util.ArrayList)82 Settings (org.elasticsearch.common.settings.Settings)79 DiscoveryNodes (org.elasticsearch.cluster.node.DiscoveryNodes)74 IOException (java.io.IOException)69 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)52 HashMap (java.util.HashMap)45 ShardId (org.elasticsearch.index.shard.ShardId)45 HashSet (java.util.HashSet)43 List (java.util.List)41 TransportAddress (org.elasticsearch.common.transport.TransportAddress)41 CountDownLatch (java.util.concurrent.CountDownLatch)39 MockTransportService (org.elasticsearch.test.transport.MockTransportService)39 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)37 Map (java.util.Map)35 ExecutionException (java.util.concurrent.ExecutionException)35 Version (org.elasticsearch.Version)35 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)31 ClusterName (org.elasticsearch.cluster.ClusterName)30