use of com.yahoo.config.provision.Flavor in project vespa by vespa-engine.
the class FlavorSpareCount method constructFlavorSpareCountGraph.
public static Map<Flavor, FlavorSpareCount> constructFlavorSpareCountGraph(List<Flavor> flavors) {
Map<Flavor, FlavorSpareCount> spareCountByFlavor = new HashMap<>();
Map<Flavor, Set<Flavor>> immediateReplaceeFlavorsByFlavor = new HashMap<>();
for (Flavor flavor : flavors) {
for (Flavor replaces : flavor.replaces()) {
if (!immediateReplaceeFlavorsByFlavor.containsKey(replaces)) {
immediateReplaceeFlavorsByFlavor.put(replaces, new HashSet<>());
}
immediateReplaceeFlavorsByFlavor.get(replaces).add(flavor);
}
spareCountByFlavor.put(flavor, new FlavorSpareCount(flavor));
}
spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
flavorSpareCount.immediateReplacees = !immediateReplaceeFlavorsByFlavor.containsKey(flavor) ? Collections.emptySet() : immediateReplaceeFlavorsByFlavor.get(flavor).stream().map(spareCountByFlavor::get).collect(Collectors.toSet());
flavorSpareCount.possibleWantedFlavors = recursiveReplacements(flavor, new HashSet<>()).stream().map(spareCountByFlavor::get).collect(Collectors.toSet());
});
return spareCountByFlavor;
}
use of com.yahoo.config.provision.Flavor in project vespa by vespa-engine.
the class NodeRetirer method retireUnallocated.
/**
* Retires unallocated nodes by moving them directly to parked.
* Returns true iff all there are no unallocated nodes that match the retirement policy
*/
boolean retireUnallocated() {
try (Mutex lock = nodeRepository().lockUnallocated()) {
List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream().filter(node -> node.state() == Node.State.ready).filter(node -> retirementPolicy.shouldRetire(node).isPresent()).collect(Collectors.groupingBy(Node::flavor, Collectors.toSet())).entrySet().stream().filter(entry -> {
Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
Node nodeToRetire = iter.next();
if (!flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor()))
break;
retirementPolicy.shouldRetire(nodeToRetire).ifPresent(reason -> {
nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)));
nodeRepository().park(nodeToRetire.hostname(), Agent.NodeRetirer, reason);
iter.remove();
});
}
if (!nodesThatShouldBeRetiredForFlavor.isEmpty()) {
String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.", entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
}
return !nodesThatShouldBeRetiredForFlavor.isEmpty();
}).count();
return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
}
}
use of com.yahoo.config.provision.Flavor in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method relocate_nodes_from_headroom_hosts.
/**
* Test relocation of nodes that violate headroom.
* <p>
* Setup 4 docker hosts and allocate one container on each (from two different applications)
* No spares - only headroom (4xd-2)
* <p>
* One application is now violating headroom and need relocation
* <p>
* Initial allocation of app 1 and 2 --> final allocation (headroom marked as H):
* <p>
* | H | H | H | H | | | | | |
* | H | H | H1a | H1b | --> | | | | |
* | | | 2a | 2b | | 1a | 1b | 2a | 2b |
*/
@Test
public void relocate_nodes_from_headroom_hosts() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application1, "1a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application1, "1b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy one of the applications
deployapp(application1, clusterSpec1, flavor, tester, 2);
// Assert that the nodes are spread across all hosts (to allow headroom)
Set<String> hostsWithChildren = new HashSet<>();
for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) {
if (!isInactiveOrRetired(node)) {
hostsWithChildren.add(node.parentHostname().get());
}
}
Assert.assertEquals(4, hostsWithChildren.size());
}
use of com.yahoo.config.provision.Flavor in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method new_docker_nodes_are_marked_as_headroom_violations.
/**
* Test that new docker nodes that will result in headroom violations are
* correctly marked as this.
* <p>
* When redeploying app1 - should not do anything (as moving app1 to host 0 and 1 would violate headroom).
* Then redeploy app 2 - should cause a relocation.
* <p>
* | H | H | H2a | H2b | | H | H | H | H |
* | H | H | H1a | H1b | --> | H | H | H1a | H1b |
* | | | 1a | 1b | | 2a | 2b | 1a | 1b |
*/
@Test
public void new_docker_nodes_are_marked_as_headroom_violations() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
String hostParent2 = dockerHosts.get(2).hostname();
String hostParent3 = dockerHosts.get(3).hostname();
addAndAssignNode(application1, "1a", hostParent2, flavorD2, 0, tester);
addAndAssignNode(application1, "1b", hostParent3, flavorD2, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", hostParent2, flavorD1, 0, tester);
addAndAssignNode(application2, "2b", hostParent3, flavorD1, 1, tester);
// Assert allocation placement - prior to re-deployment
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 1
deployapp(application1, clusterSpec1, flavorD2, tester, 2);
// Re-assert allocation placement
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 2
deployapp(application2, clusterSpec2, flavorD1, tester, 2);
// Now app2 should have re-located
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
}
use of com.yahoo.config.provision.Flavor in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method spare_capacity_used_only_when_replacement.
@Test
public void spare_capacity_used_only_when_replacement() {
// Use spare capacity only when replacement (i.e one node is failed)
// Test should allocate as much capacity as possible, verify that it is not possible to allocate one more unit
// Verify that there is still capacity (available spare)
// Fail one node and redeploy, Verify that one less node is empty.
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
// Only run test if there _is_ spare capacity
if (tester.provisioner().getSpareCapacityProd() == 0) {
return;
}
// Setup test
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
deployZoneApp(tester);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3");
// Deploy initial state (can max deploy 3 nodes due to redundancy requirements)
List<HostSpec> hosts = tester.prepare(application1, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false), 3, 1, flavor.canonicalName());
tester.activate(application1, ImmutableSet.copyOf(hosts));
DockerHostCapacity capacity = new DockerHostCapacity(tester.nodeRepository().getNodes(Node.State.values()));
assertThat(capacity.freeCapacityInFlavorEquivalence(flavor), greaterThan(0));
List<Node> initialSpareCapacity = findSpareCapacity(tester);
assertThat(initialSpareCapacity.size(), is(2));
try {
hosts = tester.prepare(application1, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false), 4, 1, flavor.canonicalName());
fail("Was able to deploy with 4 nodes, should not be able to use spare capacity");
} catch (OutOfCapacityException e) {
}
tester.fail(hosts.get(0));
hosts = tester.prepare(application1, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false), 3, 1, flavor.canonicalName());
tester.activate(application1, ImmutableSet.copyOf(hosts));
List<Node> finalSpareCapacity = findSpareCapacity(tester);
assertThat(finalSpareCapacity.size(), is(1));
}
Aggregations