use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class PublicationTests method testClusterStatePublishingWithFaultyNodeAfterCommit.
public void testClusterStatePublishingWithFaultyNodeAfterCommit() throws InterruptedException {
VotingConfiguration singleNodeConfig = new VotingConfiguration(Set.of(n1.getId()));
initializeCluster(singleNodeConfig);
AssertingAckListener ackListener = new AssertingAckListener(nodes.size());
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(n1).add(n2).add(n3).localNodeId(n1.getId()).build();
boolean publicationDidNotMakeItToNode2 = randomBoolean();
AtomicInteger remainingActions = new AtomicInteger(publicationDidNotMakeItToNode2 ? 2 : 3);
int injectFaultAt = randomInt(remainingActions.get() - 1);
logger.info("Injecting fault at: {}, publicationDidNotMakeItToNode2: {}", injectFaultAt, publicationDidNotMakeItToNode2);
MockPublication publication = node1.publish(CoordinationStateTests.clusterState(1L, 2L, discoveryNodes, singleNodeConfig, singleNodeConfig, 42L), ackListener, Collections.emptySet());
publication.pendingPublications.entrySet().stream().collect(shuffle()).forEach(e -> {
if (e.getKey().equals(n2) == false || publicationDidNotMakeItToNode2 == false) {
PublishResponse publishResponse = nodeResolver.apply(e.getKey()).coordinationState.handlePublishRequest(publication.publishRequest);
e.getValue().onResponse(new PublishWithJoinResponse(publishResponse, Optional.empty()));
}
});
publication.pendingCommits.entrySet().stream().collect(shuffle()).forEach(e -> {
if (e.getKey().equals(n2)) {
// we must fail node before committing for the node, otherwise failing the node is ignored
publication.onFaultyNode(n2);
}
if (remainingActions.decrementAndGet() == injectFaultAt) {
publication.onFaultyNode(n2);
}
if (e.getKey().equals(n2) == false || randomBoolean()) {
nodeResolver.apply(e.getKey()).coordinationState.handleCommit(publication.applyCommit);
e.getValue().onResponse(TransportResponse.Empty.INSTANCE);
}
});
// we need to complete publication by failing the node
if (publicationDidNotMakeItToNode2 && remainingActions.get() > injectFaultAt) {
publication.onFaultyNode(n2);
}
assertTrue(publication.completed);
assertTrue(publication.committed);
// has no influence
publication.onFaultyNode(randomFrom(n1, n3));
List<Tuple<DiscoveryNode, Throwable>> errors = ackListener.awaitErrors(0L, TimeUnit.SECONDS);
assertThat(errors.size(), equalTo(1));
assertThat(errors.get(0).v1(), equalTo(n2));
assertThat(errors.get(0).v2().getMessage(), containsString("faulty node"));
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class PublicationTests method testClusterStatePublishingTimesOutAfterCommit.
public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedException {
VotingConfiguration config = new VotingConfiguration(randomBoolean() ? Set.of(n1.getId(), n2.getId()) : Set.of(n1.getId(), n2.getId(), n3.getId()));
initializeCluster(config);
AssertingAckListener ackListener = new AssertingAckListener(nodes.size());
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(n1).add(n2).add(n3).localNodeId(n1.getId()).build();
MockPublication publication = node1.publish(CoordinationStateTests.clusterState(1L, 2L, discoveryNodes, config, config, 42L), ackListener, Collections.emptySet());
boolean publishedToN3 = randomBoolean();
publication.pendingPublications.entrySet().stream().collect(shuffle()).forEach(e -> {
if (e.getKey().equals(n3) == false || publishedToN3) {
PublishResponse publishResponse = nodeResolver.apply(e.getKey()).coordinationState.handlePublishRequest(publication.publishRequest);
e.getValue().onResponse(new PublishWithJoinResponse(publishResponse, Optional.empty()));
}
});
assertNotNull(publication.applyCommit);
Set<DiscoveryNode> committingNodes = new HashSet<>(randomSubsetOf(discoNodes));
if (publishedToN3 == false) {
committingNodes.remove(n3);
}
logger.info("Committing nodes: {}", committingNodes);
publication.pendingCommits.entrySet().stream().collect(shuffle()).forEach(e -> {
if (committingNodes.contains(e.getKey())) {
nodeResolver.apply(e.getKey()).coordinationState.handleCommit(publication.applyCommit);
e.getValue().onResponse(TransportResponse.Empty.INSTANCE);
}
});
publication.cancel("timed out");
assertTrue(publication.completed);
assertTrue(publication.committed);
assertEquals(committingNodes, ackListener.await(0L, TimeUnit.SECONDS));
// check that acking still works after publication completed
if (publishedToN3 == false) {
publication.pendingPublications.get(n3).onResponse(new PublishWithJoinResponse(node3.coordinationState.handlePublishRequest(publication.publishRequest), Optional.empty()));
}
assertEquals(discoNodes, publication.pendingCommits.keySet());
Set<DiscoveryNode> nonCommittedNodes = Sets.difference(discoNodes, committingNodes);
logger.info("Non-committed nodes: {}", nonCommittedNodes);
nonCommittedNodes.stream().collect(shuffle()).forEach(n -> publication.pendingCommits.get(n).onResponse(TransportResponse.Empty.INSTANCE));
assertEquals(discoNodes, ackListener.await(0L, TimeUnit.SECONDS));
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class IndicesClusterStateService method createOrUpdateShards.
private void createOrUpdateShards(final ClusterState state) {
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
if (localRoutingNode == null) {
return;
}
DiscoveryNodes nodes = state.nodes();
RoutingTable routingTable = state.routingTable();
for (final ShardRouting shardRouting : localRoutingNode) {
ShardId shardId = shardRouting.shardId();
if (failedShardsCache.containsKey(shardId) == false) {
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(shardId.getIndex());
assert indexService != null : "index " + shardId.getIndex() + " should have been created by createIndices";
Shard shard = indexService.getShardOrNull(shardId.id());
if (shard == null) {
assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards";
createShard(nodes, routingTable, shardRouting, state);
} else {
updateShard(nodes, shardRouting, shard, routingTable, state);
}
}
}
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class InternalTestCluster method validateClusterFormed.
/**
* ensure a cluster is formed with all published nodes.
*/
public synchronized void validateClusterFormed() {
final Set<DiscoveryNode> expectedNodes = new HashSet<>();
for (NodeAndClient nodeAndClient : nodes.values()) {
expectedNodes.add(getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode());
}
logger.trace("validating cluster formed, expecting {}", expectedNodes);
try {
assertBusy(() -> {
final List<ClusterState> states = nodes.values().stream().map(node -> getInstanceFromNode(ClusterService.class, node.node())).map(ClusterService::state).collect(Collectors.toList());
final String debugString = ", expected nodes: " + expectedNodes + " and actual cluster states " + states;
// all nodes have a master
assertTrue("Missing master" + debugString, states.stream().allMatch(cs -> cs.nodes().getMasterNodeId() != null));
// all nodes have the same master (in same term)
assertEquals("Not all masters in same term" + debugString, 1, states.stream().mapToLong(ClusterState::term).distinct().count());
// all nodes know about all other nodes
states.forEach(cs -> {
DiscoveryNodes discoveryNodes = cs.nodes();
assertEquals("Node size mismatch" + debugString, expectedNodes.size(), discoveryNodes.getSize());
for (DiscoveryNode expectedNode : expectedNodes) {
assertTrue("Expected node to exist: " + expectedNode + debugString, discoveryNodes.nodeExists(expectedNode));
}
});
}, 30, TimeUnit.SECONDS);
} catch (AssertionError ae) {
throw new IllegalStateException("cluster failed to form", ae);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class InternalTestCluster method nodesInclude.
/**
* Returns a set of nodes that have at least one shard of the given index.
*/
public synchronized Set<String> nodesInclude(String index) {
if (clusterService().state().routingTable().hasIndex(index)) {
List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
DiscoveryNodes discoveryNodes = clusterService().state().getNodes();
Set<String> nodes = new HashSet<>();
for (ShardRouting shardRouting : allShards) {
if (shardRouting.assignedToNode()) {
DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId());
nodes.add(discoveryNode.getName());
}
}
return nodes;
}
return Collections.emptySet();
}
Aggregations