use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method allocation_should_fail_when_host_is_not_active.
@Test(expected = OutOfCapacityException.class)
public void allocation_should_fail_when_host_is_not_active() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32);
deployZoneApp(tester);
ApplicationId application = tester.makeApplicationId();
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3");
tester.prepare(application, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false), 2, 1, flavor.canonicalName());
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method reloacte_failed_nodes.
/**
* Test an allocation workflow:
* <p>
* 5 Hosts of capacity 3 (2 spares)
* - Allocate app with 3 nodes
* - Allocate app with 2 nodes
* - Fail host and check redistribution
*/
@Test
public void reloacte_failed_nodes() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
deployapp(application1, clusterSpec1, flavor, tester, 3);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
deployapp(application2, clusterSpec2, flavor, tester, 2);
// Application 3
ApplicationId application3 = makeApplicationId("t3", "a3");
ClusterSpec clusterSpec3 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
deployapp(application3, clusterSpec3, flavor, tester, 2);
// App 2 and 3 should have been allocated to the same nodes - fail on of the parent hosts from there
String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get();
tester.nodeRepository().failRecursively(parent, Agent.system, "Testing");
// Redeploy all applications
deployapp(application1, clusterSpec1, flavor, tester, 3);
deployapp(application2, clusterSpec2, flavor, tester, 2);
deployapp(application3, clusterSpec3, flavor, tester, 2);
Map<Integer, Integer> numberOfChildrenStat = new HashMap<>();
for (Node node : dockerHosts) {
int nofChildren = tester.nodeRepository().getChildNodes(node.hostname()).size();
if (!numberOfChildrenStat.containsKey(nofChildren)) {
numberOfChildrenStat.put(nofChildren, 0);
}
numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1);
}
assertEquals(3l, (long) numberOfChildrenStat.get(3));
assertEquals(1l, (long) numberOfChildrenStat.get(0));
assertEquals(1l, (long) numberOfChildrenStat.get(1));
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method only_preferred_container_is_moved_from_hosts_with_headroom_violations.
/**
* Test that we only relocate the smallest nodes from a host to free up headroom.
* <p>
* The reason we want to do this is that it is an cheap approximation for the optimal solution as we
* pick headroom to be on the hosts were we are closest to fulfill the headroom requirement.
*
* Both applications could be moved here to free up headroom - but we want app2 (which is smallest) to be moved.
* <p>
* | H | H | H2a | H2b | | H | H | H | H |
* | H | H | H1a | H1b | --> | H | H | H | H |
* | | | 1a | 1b | | 2a | 2b | 1a | 1b |
* | | | | | | | | 1a | 1b |
*/
@Test
public void only_preferred_container_is_moved_from_hosts_with_headroom_violations() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-medium", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
String hostParent2 = dockerHosts.get(2).hostname();
String hostParent3 = dockerHosts.get(3).hostname();
addAndAssignNode(application1, "1a", hostParent2, flavorD2, 0, tester);
addAndAssignNode(application1, "1b", hostParent3, flavorD2, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", hostParent2, flavorD1, 0, tester);
addAndAssignNode(application2, "2b", hostParent3, flavorD1, 1, tester);
// Assert allocation placement - prior to re-deployment
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 1
deployapp(application1, clusterSpec1, flavorD2, tester, 2);
// Re-assert allocation placement
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 2
deployapp(application2, clusterSpec2, flavorD1, tester, 2);
// Now app2 should have re-located
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method relocate_nodes_from_spare_hosts.
/**
* Test relocation of nodes from spare hosts.
* <p>
* Setup 4 docker hosts and allocate one container on each (from two different applications)
* No headroom defined - only getSpareCapacityProd() spares.
* <p>
* Check that it relocates containers away from the getSpareCapacityProd() spares
* <p>
* Initial allocation of app 1 and 2 --> final allocation (example using 2 spares):
* <p>
* | | | | | | | | | |
* | | | | | --> | 2a | 2b | | |
* | 1a | 1b | 2a | 2b | | 1a | 1b | | |
*/
@Test
public void relocate_nodes_from_spare_hosts() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), flavor, 0, tester);
addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), flavor, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
deployapp(application1, clusterSpec1, flavor, tester, 2);
deployapp(application2, clusterSpec2, flavor, tester, 2);
// Assert that we have two spare nodes (two hosts that are don't have allocations)
Set<String> hostsWithChildren = new HashSet<>();
for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) {
if (!isInactiveOrRetired(node)) {
hostsWithChildren.add(node.parentHostname().get());
}
}
Assert.assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size());
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class ProvisioningTest method out_of_capacity_all_nodes_want_to_retire.
@Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")));
ApplicationId application = tester.makeApplicationId();
// Flag all nodes for retirement
List<Node> readyNodes = tester.makeReadyNodes(5, "default");
readyNodes.forEach(node -> tester.patchNode(node.with(node.status().withWantToRetire(true))));
try {
prepare(application, 2, 0, 2, 0, "default", tester);
fail("Expected exception");
} catch (OutOfCapacityException e) {
assertTrue(e.getMessage().startsWith("Could not satisfy request"));
}
}
Aggregations