use of com.yahoo.config.provision.NodeFlavors in project vespa by vespa-engine.
the class MetricsReporterTest method docker_metrics.
@Test
public void docker_metrics() throws Exception {
NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("host", "docker", "docker2");
Curator curator = new MockCurator();
NodeRepository nodeRepository = new NodeRepository(nodeFlavors, curator, Clock.systemUTC(), Zone.defaultZone(), new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa"), true);
// Allow 4 containers
Set<String> additionalIps = new HashSet<>();
additionalIps.add("::2");
additionalIps.add("::3");
additionalIps.add("::4");
additionalIps.add("::5");
Node dockerHost = Node.create("openStackId1", Collections.singleton("::1"), additionalIps, "dockerHost", Optional.empty(), nodeFlavors.getFlavorOrThrow("host"), NodeType.host);
nodeRepository.addNodes(Collections.singletonList(dockerHost));
nodeRepository.setDirty("dockerHost", Agent.system, getClass().getSimpleName());
nodeRepository.setReady("dockerHost", Agent.system, getClass().getSimpleName());
Node container1 = Node.createDockerNode("openStackId1:1", Collections.singleton("::2"), Collections.emptySet(), "container1", Optional.of("dockerHost"), nodeFlavors.getFlavorOrThrow("docker"), NodeType.tenant);
container1 = container1.with(allocation(Optional.of("app1")).get());
nodeRepository.addDockerNodes(Collections.singletonList(container1));
Node container2 = Node.createDockerNode("openStackId1:2", Collections.singleton("::3"), Collections.emptySet(), "container2", Optional.of("dockerHost"), nodeFlavors.getFlavorOrThrow("docker2"), NodeType.tenant);
container2 = container2.with(allocation(Optional.of("app2")).get());
nodeRepository.addDockerNodes(Collections.singletonList(container2));
Orchestrator orchestrator = mock(Orchestrator.class);
ServiceMonitor serviceMonitor = mock(ServiceMonitor.class);
when(orchestrator.getNodeStatus(any())).thenReturn(HostStatus.NO_REMARKS);
ServiceModel serviceModel = mock(ServiceModel.class);
when(serviceMonitor.getServiceModelSnapshot()).thenReturn(serviceModel);
when(serviceModel.getServiceInstancesByHostName()).thenReturn(Collections.emptyMap());
TestMetric metric = new TestMetric();
MetricsReporter metricsReporter = new MetricsReporter(nodeRepository, metric, orchestrator, serviceMonitor, Duration.ofMinutes(1), new JobControl(nodeRepository.database()));
metricsReporter.maintain();
assertEquals(0L, metric.values.get("hostedVespa.readyHosts"));
/**
* Only tenants counts *
*/
assertEquals(2L, metric.values.get("hostedVespa.reservedHosts"));
assertEquals(12.0, metric.values.get("hostedVespa.docker.totalCapacityDisk"));
assertEquals(10.0, metric.values.get("hostedVespa.docker.totalCapacityMem"));
assertEquals(7.0, metric.values.get("hostedVespa.docker.totalCapacityCpu"));
assertEquals(6.0, metric.values.get("hostedVespa.docker.freeCapacityDisk"));
assertEquals(3.0, metric.values.get("hostedVespa.docker.freeCapacityMem"));
assertEquals(4.0, metric.values.get("hostedVespa.docker.freeCapacityCpu"));
assertContext(metric, "hostedVespa.docker.freeCapacityFlavor", 1, 0);
assertContext(metric, "hostedVespa.docker.idealHeadroomFlavor", 0, 0);
assertContext(metric, "hostedVespa.docker.hostsAvailableFlavor", 1l, 0l);
}
use of com.yahoo.config.provision.NodeFlavors in project vespa by vespa-engine.
the class NodeRetirerTest method setup.
@Before
public void setup() {
doAnswer(invoke -> {
boolean shouldRetire = ((Node) invoke.getArguments()[0]).ipAddresses().equals(Collections.singleton("::1"));
return shouldRetire ? Optional.of("Some reason") : Optional.empty();
}).when(policy).shouldRetire(any(Node.class));
when(policy.isActive()).thenReturn(true);
NodeFlavors nodeFlavors = NodeRetirerTester.makeFlavors(5);
tester = new NodeRetirerTester(nodeFlavors);
retirer = spy(tester.makeNodeRetirer(policy));
tester.createReadyNodesByFlavor(21, 42, 27, 15, 8);
tester.deployApp("vespa", "calendar", new int[] { 3 }, new int[] { 7 });
tester.deployApp("vespa", "notes", new int[] { 0 }, new int[] { 3 });
tester.deployApp("sports", "results", new int[] { 0 }, new int[] { 6 });
tester.deployApp("search", "images", new int[] { 3 }, new int[] { 4 });
tester.deployApp("search", "videos", new int[] { 2 }, new int[] { 2 });
tester.deployApp("tester", "my-app", new int[] { 1, 2 }, new int[] { 4, 6 });
}
use of com.yahoo.config.provision.NodeFlavors in project vespa by vespa-engine.
the class NodePrioritizer method findHeadroomHosts.
/**
* Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
*
* If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
* a headroom request.
*/
private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
List<Node> hostsSortedOnLeastCapacity = nodes.stream().filter(n -> !spareNodes.contains(n)).filter(node -> node.type().equals(NodeType.host)).filter(dockerHost -> dockerHost.state().equals(Node.State.active)).filter(dockerHost -> capacity.freeIPs(dockerHost) > 0).sorted((a, b) -> capacity.compareWithoutInactive(b, a)).collect(Collectors.toList());
// For all flavors with ideal headroom - find which hosts this headroom should be allocated to
for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
Set<Node> tempHeadroom = new HashSet<>();
Set<Node> notEnoughCapacity = new HashSet<>();
ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
// Select hosts that has available capacity for both headroom and for new allocations
for (Node host : hostsSortedOnLeastCapacity) {
if (headroomHosts.containsKey(host))
continue;
if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
headroomHosts.put(host, headroomCapacity);
tempHeadroom.add(host);
} else {
notEnoughCapacity.add(host);
}
if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
break;
}
}
// Now check if we have enough headroom - if not choose the nodes that almost has it
if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
List<Node> violations = notEnoughCapacity.stream().sorted((a, b) -> capacity.compare(b, a)).limit(flavor.getIdealHeadroom() - tempHeadroom.size()).collect(Collectors.toList());
for (Node hostViolatingHeadrom : violations) {
headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
}
}
}
return headroomHosts;
}
Aggregations