use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method reloacte_failed_nodes.
/**
* Test an allocation workflow:
* <p>
* 5 Hosts of capacity 3 (2 spares)
* - Allocate app with 3 nodes
* - Allocate app with 2 nodes
* - Fail host and check redistribution
*/
@Test
public void reloacte_failed_nodes() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
deployapp(application1, clusterSpec1, flavor, tester, 3);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
deployapp(application2, clusterSpec2, flavor, tester, 2);
// Application 3
ApplicationId application3 = makeApplicationId("t3", "a3");
ClusterSpec clusterSpec3 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
deployapp(application3, clusterSpec3, flavor, tester, 2);
// App 2 and 3 should have been allocated to the same nodes - fail on of the parent hosts from there
String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get();
tester.nodeRepository().failRecursively(parent, Agent.system, "Testing");
// Redeploy all applications
deployapp(application1, clusterSpec1, flavor, tester, 3);
deployapp(application2, clusterSpec2, flavor, tester, 2);
deployapp(application3, clusterSpec3, flavor, tester, 2);
Map<Integer, Integer> numberOfChildrenStat = new HashMap<>();
for (Node node : dockerHosts) {
int nofChildren = tester.nodeRepository().getChildNodes(node.hostname()).size();
if (!numberOfChildrenStat.containsKey(nofChildren)) {
numberOfChildrenStat.put(nofChildren, 0);
}
numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1);
}
assertEquals(3l, (long) numberOfChildrenStat.get(3));
assertEquals(1l, (long) numberOfChildrenStat.get(0));
assertEquals(1l, (long) numberOfChildrenStat.get(1));
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method addAndAssignNode.
private Node addAndAssignNode(ApplicationId id, String hostname, String parentHostname, Flavor flavor, int index, ProvisioningTester tester) {
Node node1a = Node.create("open1", Collections.singleton("127.0.0.100"), new HashSet<>(), hostname, Optional.of(parentHostname), flavor, NodeType.tenant);
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false).with(Optional.of(ClusterSpec.Group.from(0)));
ClusterMembership clusterMembership1 = ClusterMembership.from(clusterSpec, index);
Node node1aAllocation = node1a.allocate(id, clusterMembership1, Instant.now());
tester.nodeRepository().addNodes(Collections.singletonList(node1aAllocation));
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator()));
tester.nodeRepository().activate(Collections.singletonList(node1aAllocation), transaction);
transaction.commit();
return node1aAllocation;
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method only_preferred_container_is_moved_from_hosts_with_headroom_violations.
/**
* Test that we only relocate the smallest nodes from a host to free up headroom.
* <p>
* The reason we want to do this is that it is an cheap approximation for the optimal solution as we
* pick headroom to be on the hosts were we are closest to fulfill the headroom requirement.
*
* Both applications could be moved here to free up headroom - but we want app2 (which is smallest) to be moved.
* <p>
* | H | H | H2a | H2b | | H | H | H | H |
* | H | H | H1a | H1b | --> | H | H | H | H |
* | | | 1a | 1b | | 2a | 2b | 1a | 1b |
* | | | | | | | | 1a | 1b |
*/
@Test
public void only_preferred_container_is_moved_from_hosts_with_headroom_violations() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-medium", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
String hostParent2 = dockerHosts.get(2).hostname();
String hostParent3 = dockerHosts.get(3).hostname();
addAndAssignNode(application1, "1a", hostParent2, flavorD2, 0, tester);
addAndAssignNode(application1, "1b", hostParent3, flavorD2, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", hostParent2, flavorD1, 0, tester);
addAndAssignNode(application2, "2b", hostParent3, flavorD1, 1, tester);
// Assert allocation placement - prior to re-deployment
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 1
deployapp(application1, clusterSpec1, flavorD2, tester, 2);
// Re-assert allocation placement
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 2
deployapp(application2, clusterSpec2, flavorD1, tester, 2);
// Now app2 should have re-located
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method findSpareCapacity.
private List<Node> findSpareCapacity(ProvisioningTester tester) {
List<Node> nodes = tester.nodeRepository().getNodes(Node.State.values());
NodeList nl = new NodeList(nodes);
return nodes.stream().filter(n -> n.type() == NodeType.host).filter(// Nodes without children
n -> nl.childrenOf(n).size() == 0).collect(Collectors.toList());
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method relocate_nodes_from_spare_hosts.
/**
* Test relocation of nodes from spare hosts.
* <p>
* Setup 4 docker hosts and allocate one container on each (from two different applications)
* No headroom defined - only getSpareCapacityProd() spares.
* <p>
* Check that it relocates containers away from the getSpareCapacityProd() spares
* <p>
* Initial allocation of app 1 and 2 --> final allocation (example using 2 spares):
* <p>
* | | | | | | | | | |
* | | | | | --> | 2a | 2b | | |
* | 1a | 1b | 2a | 2b | | 1a | 1b | | |
*/
@Test
public void relocate_nodes_from_spare_hosts() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), flavor, 0, tester);
addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), flavor, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
deployapp(application1, clusterSpec1, flavor, tester, 2);
deployapp(application2, clusterSpec2, flavor, tester, 2);
// Assert that we have two spare nodes (two hosts that are don't have allocations)
Set<String> hostsWithChildren = new HashSet<>();
for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) {
if (!isInactiveOrRetired(node)) {
hostsWithChildren.add(node.parentHostname().get());
}
}
Assert.assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size());
}
Aggregations