use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class AllocationVisualizer method paintComponent.
@Override
public void paintComponent(Graphics g) {
super.paintComponent(g);
System.out.println("PAINTING");
if (steps.size() == 0)
return;
int nodeX = 40;
// Start at the bottom
int nodeY = BOX_HEIGHT - 20;
// Draw the box
g.setColor(Color.WHITE);
g.fillRect(0, 0, BOX_WIDTH, BOX_HEIGHT);
// Find number of docker hosts (to calculate start, and width of each)
// Draw the docker hosts - and color each container according to application
AllocationSnapshot simStep = steps.get(step);
NodeList hosts = simStep.nodes.nodeType(NodeType.host);
for (Node host : hosts.asList()) {
// Paint the host
paintNode(host, g, nodeX, nodeY, true);
// Paint containers
NodeList containers = simStep.nodes.childrenOf(host);
for (Node container : containers.asList()) {
nodeY = paintNode(container, g, nodeX, nodeY, false);
}
// Next host
nodeX += nodeWidth + nodeSpacing;
nodeY = BOX_HEIGHT - 20;
}
// Display messages
g.setColor(Color.BLACK);
g.setFont(new Font("Courier New", Font.BOLD, 15));
g.drawString(simStep.task, 20, 30);
g.drawString(simStep.message, 20, 50);
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class NodeAllocation method offer.
/**
* Offer some nodes to this. The nodes may have an allocation to a different application or cluster,
* an allocation to this cluster, or no current allocation (in which case one is assigned).
*
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
* @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
List<Node> offer(List<PrioritizableNode> nodesPrioritized) {
List<Node> accepted = new ArrayList<>();
for (PrioritizableNode offeredPriority : nodesPrioritized) {
Node offered = offeredPriority.node;
if (offered.allocation().isPresent()) {
boolean wantToRetireNode = false;
ClusterMembership membership = offered.allocation().get().membership();
// wrong application
if (!offered.allocation().get().owner().equals(application))
continue;
// wrong cluster id/type
if (!membership.cluster().equalsIgnoringGroupAndVespaVersion(cluster))
continue;
// wrong group and we can't or have no reason to change it
if ((!offeredPriority.isSurplusNode || saturated()) && !membership.cluster().group().equals(cluster.group()))
continue;
// don't accept; causes removal
if (offered.allocation().get().isRemovable())
continue;
// duplicate index (just to be sure)
if (indexes.contains(membership.index()))
continue;
// conditions on which we want to retire nodes that were allocated previously
if (offeredNodeHasParentHostnameAlreadyAccepted(this.nodes, offered))
wantToRetireNode = true;
if (!hasCompatibleFlavor(offered))
wantToRetireNode = true;
if (offered.flavor().isRetired())
wantToRetireNode = true;
if (offered.status().wantToRetire())
wantToRetireNode = true;
if (requestedNodes.isExclusive() && !hostsOnly(application.tenant(), offered.parentHostname()))
wantToRetireNode = true;
if ((!saturated() && hasCompatibleFlavor(offered)) || acceptToRetire(offered)) {
accepted.add(acceptNode(offeredPriority, wantToRetireNode));
}
} else if (!saturated() && hasCompatibleFlavor(offered)) {
if (offeredNodeHasParentHostnameAlreadyAccepted(this.nodes, offered)) {
++rejectedWithClashingParentHost;
continue;
}
if (!exclusiveTo(application.tenant(), offered.parentHostname())) {
++rejectedDueToExclusivity;
continue;
}
if (requestedNodes.isExclusive() && !hostsOnly(application.tenant(), offered.parentHostname())) {
++rejectedDueToExclusivity;
continue;
}
if (offered.flavor().isRetired()) {
continue;
}
if (offered.status().wantToRetire()) {
continue;
}
offeredPriority.node = offered.allocate(application, ClusterMembership.from(cluster, highestIndex.add(1)), nodeRepository.clock().instant());
accepted.add(acceptNode(offeredPriority, false));
}
}
return accepted;
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class NodeRetirerTester method createReadyNodesByFlavor.
void createReadyNodesByFlavor(int... nums) {
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < nums.length; i++) {
Flavor flavor = flavors.get(i);
for (int j = 0; j < nums[i]; j++) {
int id = nextNodeId++;
nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com", Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant));
}
}
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes, Agent.system, getClass().getSimpleName());
nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName());
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class RetiredExpirerTest method ensure_retired_groups_time_out.
@Test
public void ensure_retired_groups_time_out() {
createReadyNodes(8, nodeRepository, nodeFlavors);
createHostNodes(4, nodeRepository, nodeFlavors);
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
activate(applicationId, cluster, 8, 8, provisioner);
activate(applicationId, cluster, 2, 2, provisioner);
assertEquals(8, nodeRepository.getNodes(applicationId, Node.State.active).size());
assertEquals(0, nodeRepository.getNodes(applicationId, Node.State.inactive).size());
// Cause inactivation of retired nodes
// Retire period spent
clock.advance(Duration.ofHours(30));
MockDeployer deployer = new MockDeployer(provisioner, Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, Capacity.fromNodeCount(2, Optional.of("default"), false), 1)));
createRetiredExpirer(deployer).run();
assertEquals(2, nodeRepository.getNodes(applicationId, Node.State.active).size());
assertEquals(6, nodeRepository.getNodes(applicationId, Node.State.inactive).size());
assertEquals(1, deployer.redeployments);
// inactivated nodes are not retired
for (Node node : nodeRepository.getNodes(applicationId, Node.State.inactive)) assertFalse(node.allocation().get().membership().retired());
}
use of com.yahoo.vespa.hosted.provision.Node in project vespa by vespa-engine.
the class MetricsReporterTest method docker_metrics.
@Test
public void docker_metrics() throws Exception {
NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("host", "docker", "docker2");
Curator curator = new MockCurator();
NodeRepository nodeRepository = new NodeRepository(nodeFlavors, curator, Clock.systemUTC(), Zone.defaultZone(), new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa"), true);
// Allow 4 containers
Set<String> additionalIps = new HashSet<>();
additionalIps.add("::2");
additionalIps.add("::3");
additionalIps.add("::4");
additionalIps.add("::5");
Node dockerHost = Node.create("openStackId1", Collections.singleton("::1"), additionalIps, "dockerHost", Optional.empty(), nodeFlavors.getFlavorOrThrow("host"), NodeType.host);
nodeRepository.addNodes(Collections.singletonList(dockerHost));
nodeRepository.setDirty("dockerHost", Agent.system, getClass().getSimpleName());
nodeRepository.setReady("dockerHost", Agent.system, getClass().getSimpleName());
Node container1 = Node.createDockerNode("openStackId1:1", Collections.singleton("::2"), Collections.emptySet(), "container1", Optional.of("dockerHost"), nodeFlavors.getFlavorOrThrow("docker"), NodeType.tenant);
container1 = container1.with(allocation(Optional.of("app1")).get());
nodeRepository.addDockerNodes(Collections.singletonList(container1));
Node container2 = Node.createDockerNode("openStackId1:2", Collections.singleton("::3"), Collections.emptySet(), "container2", Optional.of("dockerHost"), nodeFlavors.getFlavorOrThrow("docker2"), NodeType.tenant);
container2 = container2.with(allocation(Optional.of("app2")).get());
nodeRepository.addDockerNodes(Collections.singletonList(container2));
Orchestrator orchestrator = mock(Orchestrator.class);
ServiceMonitor serviceMonitor = mock(ServiceMonitor.class);
when(orchestrator.getNodeStatus(any())).thenReturn(HostStatus.NO_REMARKS);
ServiceModel serviceModel = mock(ServiceModel.class);
when(serviceMonitor.getServiceModelSnapshot()).thenReturn(serviceModel);
when(serviceModel.getServiceInstancesByHostName()).thenReturn(Collections.emptyMap());
TestMetric metric = new TestMetric();
MetricsReporter metricsReporter = new MetricsReporter(nodeRepository, metric, orchestrator, serviceMonitor, Duration.ofMinutes(1), new JobControl(nodeRepository.database()));
metricsReporter.maintain();
assertEquals(0L, metric.values.get("hostedVespa.readyHosts"));
/**
* Only tenants counts *
*/
assertEquals(2L, metric.values.get("hostedVespa.reservedHosts"));
assertEquals(12.0, metric.values.get("hostedVespa.docker.totalCapacityDisk"));
assertEquals(10.0, metric.values.get("hostedVespa.docker.totalCapacityMem"));
assertEquals(7.0, metric.values.get("hostedVespa.docker.totalCapacityCpu"));
assertEquals(6.0, metric.values.get("hostedVespa.docker.freeCapacityDisk"));
assertEquals(3.0, metric.values.get("hostedVespa.docker.freeCapacityMem"));
assertEquals(4.0, metric.values.get("hostedVespa.docker.freeCapacityCpu"));
assertContext(metric, "hostedVespa.docker.freeCapacityFlavor", 1, 0);
assertContext(metric, "hostedVespa.docker.idealHeadroomFlavor", 0, 0);
assertContext(metric, "hostedVespa.docker.hostsAvailableFlavor", 1l, 0l);
}
Aggregations