Search in sources :

Example 1 with Allocation

use of com.yahoo.vespa.hosted.provision.node.Allocation in project vespa by vespa-engine.

the class NodeAllocation method finalNodes.

/**
 * Make the number of <i>non-retired</i> nodes in the list equal to the requested number
 * of nodes, and retire the rest of the list. Only retire currently active nodes.
 * Prefer to retire nodes of the wrong flavor.
 * Make as few changes to the retired set as possible.
 *
 * @param surplusNodes this will add nodes not any longer needed by this group to this list
 * @return the final list of nodes
 */
List<Node> finalNodes(List<Node> surplusNodes) {
    int currentRetiredCount = (int) nodes.stream().filter(node -> node.node.allocation().get().membership().retired()).count();
    int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
    if (deltaRetiredCount > 0) {
        // retire until deltaRetiredCount is 0, prefer to retire higher indexes to minimize redistribution
        for (PrioritizableNode node : byDecreasingIndex(nodes)) {
            if (!node.node.allocation().get().membership().retired() && node.node.state().equals(Node.State.active)) {
                node.node = node.node.retire(Agent.application, nodeRepository.clock().instant());
                // offer this node to other groups
                surplusNodes.add(node.node);
                if (--deltaRetiredCount == 0)
                    break;
            }
        }
    } else if (deltaRetiredCount < 0) {
        // unretire until deltaRetiredCount is 0
        for (PrioritizableNode node : byIncreasingIndex(nodes)) {
            if (node.node.allocation().get().membership().retired() && hasCompatibleFlavor(node.node)) {
                node.node = node.node.unretire();
                if (++deltaRetiredCount == 0)
                    break;
            }
        }
    }
    for (PrioritizableNode node : nodes) {
        node.node = requestedNodes.assignRequestedFlavor(node.node);
        // Set whether the node is exclusive
        Allocation allocation = node.node.allocation().get();
        node.node = node.node.with(allocation.with(allocation.membership().with(allocation.membership().cluster().exclusive(requestedNodes.isExclusive()))));
    }
    return nodes.stream().map(n -> n.node).collect(Collectors.toList());
}
Also used : ApplicationId(com.yahoo.config.provision.ApplicationId) ClusterMembership(com.yahoo.config.provision.ClusterMembership) Collection(java.util.Collection) ClusterSpec(com.yahoo.config.provision.ClusterSpec) Set(java.util.Set) Node(com.yahoo.vespa.hosted.provision.Node) Collectors(java.util.stream.Collectors) TenantName(com.yahoo.config.provision.TenantName) Allocation(com.yahoo.vespa.hosted.provision.node.Allocation) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) NodeRepository(com.yahoo.vespa.hosted.provision.NodeRepository) MutableInteger(com.yahoo.lang.MutableInteger) List(java.util.List) Agent(com.yahoo.vespa.hosted.provision.node.Agent) Optional(java.util.Optional) Comparator(java.util.Comparator) LinkedHashSet(java.util.LinkedHashSet) Allocation(com.yahoo.vespa.hosted.provision.node.Allocation)

Example 2 with Allocation

use of com.yahoo.vespa.hosted.provision.node.Allocation in project vespa by vespa-engine.

the class MetricsReporter method updateNodeMetrics.

private void updateNodeMetrics(Node node, Map<HostName, List<ServiceInstance>> servicesByHost) {
    Metric.Context context;
    Optional<Allocation> allocation = node.allocation();
    if (allocation.isPresent()) {
        ApplicationId applicationId = allocation.get().owner();
        context = getContextAt("state", node.state().name(), "host", node.hostname(), "tenantName", applicationId.tenant().value(), "applicationId", applicationId.serializedForm().replace(':', '.'), "app", toApp(applicationId), "clustertype", allocation.get().membership().cluster().type().name(), "clusterid", allocation.get().membership().cluster().id().value());
        long wantedRestartGeneration = allocation.get().restartGeneration().wanted();
        metric.set("wantedRestartGeneration", wantedRestartGeneration, context);
        long currentRestartGeneration = allocation.get().restartGeneration().current();
        metric.set("currentRestartGeneration", currentRestartGeneration, context);
        boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration;
        metric.set("wantToRestart", wantToRestart ? 1 : 0, context);
        Version wantedVersion = allocation.get().membership().cluster().vespaVersion();
        double wantedVersionNumber = getVersionAsNumber(wantedVersion);
        metric.set("wantedVespaVersion", wantedVersionNumber, context);
        Optional<Version> currentVersion = node.status().vespaVersion();
        boolean converged = currentVersion.isPresent() && currentVersion.get().equals(wantedVersion);
        metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context);
    } else {
        context = getContextAt("state", node.state().name(), "host", node.hostname());
    }
    Optional<Version> currentVersion = node.status().vespaVersion();
    // Node repo checks for !isEmpty(), so let's do that here too.
    if (currentVersion.isPresent() && !currentVersion.get().isEmpty()) {
        double currentVersionNumber = getVersionAsNumber(currentVersion.get());
        metric.set("currentVespaVersion", currentVersionNumber, context);
    }
    long wantedRebootGeneration = node.status().reboot().wanted();
    metric.set("wantedRebootGeneration", wantedRebootGeneration, context);
    long currentRebootGeneration = node.status().reboot().current();
    metric.set("currentRebootGeneration", currentRebootGeneration, context);
    boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration;
    metric.set("wantToReboot", wantToReboot ? 1 : 0, context);
    metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context);
    metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context);
    metric.set("hardwareFailure", node.status().hardwareFailureDescription().isPresent() ? 1 : 0, context);
    metric.set("hardwareDivergence", node.status().hardwareDivergence().isPresent() ? 1 : 0, context);
    try {
        HostStatus status = orchestrator.getNodeStatus(new HostName(node.hostname()));
        boolean allowedToBeDown = status == HostStatus.ALLOWED_TO_BE_DOWN;
        metric.set("allowedToBeDown", allowedToBeDown ? 1 : 0, context);
    } catch (HostNameNotFoundException e) {
    // Ignore
    }
    long numberOfServices;
    HostName hostName = new HostName(node.hostname());
    List<ServiceInstance> services = servicesByHost.get(hostName);
    if (services == null) {
        numberOfServices = 0;
    } else {
        Map<ServiceStatus, Long> servicesCount = services.stream().collect(Collectors.groupingBy(ServiceInstance::serviceStatus, Collectors.counting()));
        numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum();
        metric.set("numberOfServicesUp", servicesCount.getOrDefault(ServiceStatus.UP, 0L), context);
        metric.set("numberOfServicesNotChecked", servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L), context);
        long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L);
        metric.set("numberOfServicesDown", numberOfServicesDown, context);
        metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context);
        boolean badNode = NodeFailer.badNode(services);
        metric.set("nodeFailerBadNode", (badNode ? 1 : 0), context);
        boolean nodeDownInNodeRepo = node.history().event(History.Event.Type.down).isPresent();
        metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context);
    }
    metric.set("numberOfServices", numberOfServices, context);
}
Also used : ServiceInstance(com.yahoo.vespa.applicationmodel.ServiceInstance) Allocation(com.yahoo.vespa.hosted.provision.node.Allocation) Version(com.yahoo.component.Version) ServiceStatus(com.yahoo.vespa.applicationmodel.ServiceStatus) Metric(com.yahoo.jdisc.Metric) HostStatus(com.yahoo.vespa.orchestrator.status.HostStatus) ApplicationId(com.yahoo.config.provision.ApplicationId) HostName(com.yahoo.vespa.applicationmodel.HostName) HostNameNotFoundException(com.yahoo.vespa.orchestrator.HostNameNotFoundException)

Example 3 with Allocation

use of com.yahoo.vespa.hosted.provision.node.Allocation in project vespa by vespa-engine.

the class IdentityDocumentGeneratorTest method generates_valid_identity_document.

@Test
public void generates_valid_identity_document() throws Exception {
    String hostname = "x.y.com";
    ApplicationId appid = ApplicationId.from(TenantName.from("tenant"), ApplicationName.from("application"), InstanceName.from("default"));
    Allocation allocation = new Allocation(appid, ClusterMembership.from("container/default/0/0", Version.fromString("1.2.3")), Generation.inital(), false);
    Node n = Node.create("ostkid", ImmutableSet.of("127.0.0.1"), new HashSet<>(), hostname, Optional.empty(), new MockNodeFlavors().getFlavorOrThrow("default"), NodeType.tenant).with(allocation);
    NodeRepository nodeRepository = mock(NodeRepository.class);
    when(nodeRepository.getNode(eq(hostname))).thenReturn(Optional.of(n));
    AutoGeneratedKeyProvider keyProvider = new AutoGeneratedKeyProvider();
    String dnsSuffix = "vespa.dns.suffix";
    AthenzProviderServiceConfig config = getAthenzProviderConfig("domain", "service", dnsSuffix, ZONE);
    IdentityDocumentGenerator identityDocumentGenerator = new IdentityDocumentGenerator(config, nodeRepository, ZONE, keyProvider);
    SignedIdentityDocument signedIdentityDocument = identityDocumentGenerator.generateSignedIdentityDocument(hostname);
    // Verify attributes
    assertEquals(hostname, signedIdentityDocument.identityDocument.instanceHostname);
    String environment = "dev";
    String region = "us-north-1";
    String expectedZoneDnsSuffix = environment + "-" + region + "." + dnsSuffix;
    assertEquals(expectedZoneDnsSuffix, signedIdentityDocument.dnsSuffix);
    ProviderUniqueId expectedProviderUniqueId = new ProviderUniqueId("tenant", "application", environment, region, "default", "default", 0);
    assertEquals(expectedProviderUniqueId, signedIdentityDocument.identityDocument.providerUniqueId);
    // Validate signature
    assertTrue("Message", InstanceValidator.isSignatureValid(keyProvider.getPublicKey(0), signedIdentityDocument.rawIdentityDocument, signedIdentityDocument.signature));
}
Also used : MockNodeFlavors(com.yahoo.vespa.hosted.provision.testutils.MockNodeFlavors) Allocation(com.yahoo.vespa.hosted.provision.node.Allocation) Node(com.yahoo.vespa.hosted.provision.Node) NodeRepository(com.yahoo.vespa.hosted.provision.NodeRepository) AutoGeneratedKeyProvider(com.yahoo.vespa.hosted.athenz.instanceproviderservice.AutoGeneratedKeyProvider) AthenzProviderServiceConfig(com.yahoo.vespa.hosted.athenz.instanceproviderservice.config.AthenzProviderServiceConfig) ApplicationId(com.yahoo.config.provision.ApplicationId) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 4 with Allocation

use of com.yahoo.vespa.hosted.provision.node.Allocation in project vespa by vespa-engine.

the class IdentityDocumentGenerator method generateIdDocument.

private IdentityDocument generateIdDocument(Node node) {
    Allocation allocation = node.allocation().orElseThrow(() -> new RuntimeException("No allocation for node " + node.hostname()));
    ProviderUniqueId providerUniqueId = new ProviderUniqueId(allocation.owner().tenant().value(), allocation.owner().application().value(), zone.environment().value(), zone.region().value(), allocation.owner().instance().value(), allocation.membership().cluster().id().value(), allocation.membership().index());
    return new IdentityDocument(providerUniqueId, // TODO: Add configserver hostname
    "localhost", node.hostname(), Instant.now());
}
Also used : Allocation(com.yahoo.vespa.hosted.provision.node.Allocation)

Aggregations

Allocation (com.yahoo.vespa.hosted.provision.node.Allocation)4 ApplicationId (com.yahoo.config.provision.ApplicationId)3 Node (com.yahoo.vespa.hosted.provision.Node)2 NodeRepository (com.yahoo.vespa.hosted.provision.NodeRepository)2 HashSet (java.util.HashSet)2 Version (com.yahoo.component.Version)1 ClusterMembership (com.yahoo.config.provision.ClusterMembership)1 ClusterSpec (com.yahoo.config.provision.ClusterSpec)1 TenantName (com.yahoo.config.provision.TenantName)1 Metric (com.yahoo.jdisc.Metric)1 MutableInteger (com.yahoo.lang.MutableInteger)1 HostName (com.yahoo.vespa.applicationmodel.HostName)1 ServiceInstance (com.yahoo.vespa.applicationmodel.ServiceInstance)1 ServiceStatus (com.yahoo.vespa.applicationmodel.ServiceStatus)1 AutoGeneratedKeyProvider (com.yahoo.vespa.hosted.athenz.instanceproviderservice.AutoGeneratedKeyProvider)1 AthenzProviderServiceConfig (com.yahoo.vespa.hosted.athenz.instanceproviderservice.config.AthenzProviderServiceConfig)1 Agent (com.yahoo.vespa.hosted.provision.node.Agent)1 MockNodeFlavors (com.yahoo.vespa.hosted.provision.testutils.MockNodeFlavors)1 HostNameNotFoundException (com.yahoo.vespa.orchestrator.HostNameNotFoundException)1 HostStatus (com.yahoo.vespa.orchestrator.status.HostStatus)1