use of com.yahoo.config.provision.ClusterSpec in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method only_preferred_container_is_moved_from_hosts_with_headroom_violations.
/**
* Test that we only relocate the smallest nodes from a host to free up headroom.
* <p>
* The reason we want to do this is that it is an cheap approximation for the optimal solution as we
* pick headroom to be on the hosts were we are closest to fulfill the headroom requirement.
*
* Both applications could be moved here to free up headroom - but we want app2 (which is smallest) to be moved.
* <p>
* | H | H | H2a | H2b | | H | H | H | H |
* | H | H | H1a | H1b | --> | H | H | H | H |
* | | | 1a | 1b | | 2a | 2b | 1a | 1b |
* | | | | | | | | 1a | 1b |
*/
@Test
public void only_preferred_container_is_moved_from_hosts_with_headroom_violations() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-medium", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
String hostParent2 = dockerHosts.get(2).hostname();
String hostParent3 = dockerHosts.get(3).hostname();
addAndAssignNode(application1, "1a", hostParent2, flavorD2, 0, tester);
addAndAssignNode(application1, "1b", hostParent3, flavorD2, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", hostParent2, flavorD1, 0, tester);
addAndAssignNode(application2, "2b", hostParent3, flavorD1, 1, tester);
// Assert allocation placement - prior to re-deployment
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 1
deployapp(application1, clusterSpec1, flavorD2, tester, 2);
// Re-assert allocation placement
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 2
deployapp(application2, clusterSpec2, flavorD1, tester, 2);
// Now app2 should have re-located
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
}
use of com.yahoo.config.provision.ClusterSpec in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method relocate_nodes_from_spare_hosts.
/**
* Test relocation of nodes from spare hosts.
* <p>
* Setup 4 docker hosts and allocate one container on each (from two different applications)
* No headroom defined - only getSpareCapacityProd() spares.
* <p>
* Check that it relocates containers away from the getSpareCapacityProd() spares
* <p>
* Initial allocation of app 1 and 2 --> final allocation (example using 2 spares):
* <p>
* | | | | | | | | | |
* | | | | | --> | 2a | 2b | | |
* | 1a | 1b | 2a | 2b | | 1a | 1b | | |
*/
@Test
public void relocate_nodes_from_spare_hosts() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), flavor, 0, tester);
addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), flavor, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
deployapp(application1, clusterSpec1, flavor, tester, 2);
deployapp(application2, clusterSpec2, flavor, tester, 2);
// Assert that we have two spare nodes (two hosts that are don't have allocations)
Set<String> hostsWithChildren = new HashSet<>();
for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) {
if (!isInactiveOrRetired(node)) {
hostsWithChildren.add(node.parentHostname().get());
}
}
Assert.assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size());
}
use of com.yahoo.config.provision.ClusterSpec in project vespa by vespa-engine.
the class ProvisioningTest method prepare.
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size, int content1Size, boolean required, String flavor, Version wantedVersion, ProvisioningTester tester) {
// "deploy prepare" with a two container clusters and a storage cluster having of two groups
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0"), wantedVersion, false);
ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1"), wantedVersion, false);
ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0"), wantedVersion, false);
ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1"), wantedVersion, false);
Set<HostSpec> container0 = prepare(application, containerCluster0, container0Size, 1, required, flavor, tester);
Set<HostSpec> container1 = prepare(application, containerCluster1, container1Size, 1, required, flavor, tester);
Set<HostSpec> content0 = prepare(application, contentCluster0, content0Size, 1, required, flavor, tester);
Set<HostSpec> content1 = prepare(application, contentCluster1, content1Size, 1, required, flavor, tester);
Set<HostSpec> allHosts = new HashSet<>();
allHosts.addAll(container0);
allHosts.addAll(container1);
allHosts.addAll(content0);
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.fromNodeCount(count, Optional.empty(), required);
int expectedContainer0Size = tester.capacityPolicies().decideSize(capacity.apply(container0Size));
int expectedContainer1Size = tester.capacityPolicies().decideSize(capacity.apply(container1Size));
int expectedContent0Size = tester.capacityPolicies().decideSize(capacity.apply(content0Size));
int expectedContent1Size = tester.capacityPolicies().decideSize(capacity.apply(content1Size));
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct", expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size, tester.nonRetired(allHosts).size());
// Check cluster/group sizes
assertEquals(expectedContainer0Size, tester.nonRetired(container0).size());
assertEquals(expectedContainer1Size, tester.nonRetired(container1).size());
assertEquals(expectedContent0Size, tester.nonRetired(content0).size());
assertEquals(expectedContent1Size, tester.nonRetired(content1).size());
// Check cluster membership
tester.assertMembersOf(containerCluster0, container0);
tester.assertMembersOf(containerCluster1, container1);
tester.assertMembersOf(contentCluster0, content0);
tester.assertMembersOf(contentCluster1, content1);
return new SystemState(allHosts, container0, container1, content0, content1);
}
use of com.yahoo.config.provision.ClusterSpec in project vespa by vespa-engine.
the class ProvisioningTest method assertCorrectFlavorPreferences.
private void assertCorrectFlavorPreferences(boolean largeIsStock) {
FlavorConfigBuilder b = new FlavorConfigBuilder();
b.addFlavor("large", 4., 8., 100, Flavor.Type.BARE_METAL).cost(10).stock(largeIsStock);
FlavorsConfig.Flavor.Builder largeVariant = b.addFlavor("large-variant", 3., 9., 101, Flavor.Type.BARE_METAL).cost(9);
b.addReplaces("large", largeVariant);
FlavorsConfig.Flavor.Builder largeVariantVariant = b.addFlavor("large-variant-variant", 4., 9., 101, Flavor.Type.BARE_METAL).cost(11);
b.addReplaces("large-variant", largeVariantVariant);
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), b.build());
// cost = 10
tester.makeReadyNodes(6, "large");
// cost = 9
tester.makeReadyNodes(6, "large-variant");
// cost = 11
tester.makeReadyNodes(6, "large-variant-variant");
ApplicationId applicationId = tester.makeApplicationId();
ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false);
ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.42"), false);
List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, 5, 1, "large");
List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, 10, 1, "large");
if (largeIsStock) {
// 'large' is replaced by 'large-variant' when possible, as it is cheaper
tester.assertNumberOfNodesWithFlavor(containerNodes, "large-variant", 5);
tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 1);
tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 6);
} else {
// 'large' is preferred when available, as it is what is exactly specified
tester.assertNumberOfNodesWithFlavor(containerNodes, "large", 5);
tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 1);
tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 6);
}
// in both cases the most expensive, never exactly specified is least preferred
tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant-variant", 3);
}
use of com.yahoo.config.provision.ClusterSpec in project vespa by vespa-engine.
the class NodeTypeProvisioningTest method retire_multiple_proxy_simultaneously.
@Test
public void retire_multiple_proxy_simultaneously() {
MockDeployer deployer = new MockDeployer(tester.provisioner(), Collections.singletonMap(application, new MockDeployer.ApplicationContext(application, clusterSpec, capacity, 1)));
RetiredExpirer retiredExpirer = new RetiredExpirer(tester.nodeRepository(), tester.orchestrator(), deployer, tester.clock(), Duration.ofDays(30), Duration.ofMinutes(10), new JobControl(tester.nodeRepository().database()));
final int numNodesToRetire = 5;
{
// Deploy
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals("Reserved all proxies", 11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active);
assertEquals("Activated all proxies", 11, nodes.size());
}
List<Node> nodesToRetire = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).subList(3, 3 + numNodesToRetire);
String currentyRetiringHostname;
{
nodesToRetire.forEach(nodeToRetire -> tester.nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToRetire(true))));
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
List<Node> nodesCurrentlyRetiring = nodes.stream().filter(node -> node.allocation().get().membership().retired()).collect(Collectors.toList());
assertEquals(1, nodesCurrentlyRetiring.size());
// The retiring node should be one of the nodes we marked for retirement
currentyRetiringHostname = nodesCurrentlyRetiring.get(0).hostname();
assertTrue(nodesToRetire.stream().map(Node::hostname).filter(hostname -> hostname.equals(currentyRetiringHostname)).count() == 1);
}
{
// Redeploying while the node is still retiring has no effect
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
List<Node> nodesCurrentlyRetiring = nodes.stream().filter(node -> node.allocation().get().membership().retired()).collect(Collectors.toList());
assertEquals(1, nodesCurrentlyRetiring.size());
// The node that started retiring is still the only one retiring
assertEquals(currentyRetiringHostname, nodesCurrentlyRetiring.get(0).hostname());
}
{
tester.advanceTime(Duration.ofMinutes(11));
retiredExpirer.run();
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(10, hosts.size());
tester.activate(application, new HashSet<>(hosts));
List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active);
assertEquals(10, nodes.size());
// Verify the node we previously set to retire has finished retiring
assertEquals(Node.State.inactive, tester.nodeRepository().getNode(currentyRetiringHostname).orElseThrow(RuntimeException::new).state());
// Verify that a node is currently retiring
List<Node> nodesCurrentlyRetiring = nodes.stream().filter(node -> node.allocation().get().membership().retired()).collect(Collectors.toList());
assertEquals(1, nodesCurrentlyRetiring.size());
// This node is different from the one that was retiring previously
String newRetiringHostname = nodesCurrentlyRetiring.get(0).hostname();
assertNotEquals(currentyRetiringHostname, newRetiringHostname);
// ... but is one of the nodes that were put to wantToRetire earlier
assertTrue(nodesToRetire.stream().map(Node::hostname).filter(hostname -> hostname.equals(newRetiringHostname)).count() == 1);
}
for (int i = 0; i < 10; i++) {
tester.advanceTime(Duration.ofMinutes(11));
retiredExpirer.run();
List<HostSpec> hosts = deployProxies(application, tester);
tester.activate(application, new HashSet<>(hosts));
}
// After a long time, all currently active proxy nodes are not marked with wantToRetire or as retired
long numRetiredActiveProxyNodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).stream().filter(node -> !node.status().wantToRetire()).filter(node -> !node.allocation().get().membership().retired()).count();
assertEquals(11 - numNodesToRetire, numRetiredActiveProxyNodes);
// All the nodes that were marked with wantToRetire earlier are now inactive
assertEquals(nodesToRetire.stream().map(Node::hostname).collect(Collectors.toSet()), tester.nodeRepository().getNodes(Node.State.inactive).stream().map(Node::hostname).collect(Collectors.toSet()));
}
Aggregations