use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class ModelGeneratorTest method toApplicationModelWithConfigServerApplication.
@Test
public void toApplicationModelWithConfigServerApplication() throws Exception {
SuperModel superModel = ExampleModel.createExampleSuperModelWithOneRpcPort(HOSTNAME, PORT);
ModelGenerator modelGenerator = new ModelGenerator();
Zone zone = new Zone(Environment.from(ENVIRONMENT), RegionName.from(REGION));
List<String> configServerHosts = Stream.of("cfg1", "cfg2", "cfg3").collect(Collectors.toList());
SlobrokMonitorManagerImpl slobrokMonitorManager = mock(SlobrokMonitorManagerImpl.class);
when(slobrokMonitorManager.getStatus(any(), any(), any(), any())).thenReturn(ServiceStatus.UP);
ServiceModel serviceModel = modelGenerator.toServiceModel(superModel, zone, configServerHosts, slobrokMonitorManager);
Map<ApplicationInstanceReference, ApplicationInstance> applicationInstances = serviceModel.getAllApplicationInstances();
assertEquals(2, applicationInstances.size());
Iterator<Map.Entry<ApplicationInstanceReference, ApplicationInstance>> iterator = applicationInstances.entrySet().iterator();
ApplicationInstance applicationInstance1 = iterator.next().getValue();
ApplicationInstance applicationInstance2 = iterator.next().getValue();
if (applicationInstance1.applicationInstanceId().equals(ConfigServerApplication.APPLICATION_INSTANCE_ID)) {
verifyConfigServerApplication(applicationInstance1);
verifyOtherApplication(applicationInstance2);
} else {
verifyConfigServerApplication(applicationInstance2);
verifyOtherApplication(applicationInstance1);
}
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class SuperModelListenerImplTest method sanityCheck.
@Test
public void sanityCheck() {
SlobrokMonitorManagerImpl slobrokMonitorManager = mock(SlobrokMonitorManagerImpl.class);
ServiceMonitorMetrics metrics = mock(ServiceMonitorMetrics.class);
ModelGenerator modelGenerator = mock(ModelGenerator.class);
Zone zone = mock(Zone.class);
List<String> configServers = new ArrayList<>();
SuperModelListenerImpl listener = new SuperModelListenerImpl(slobrokMonitorManager, metrics, modelGenerator, zone, configServers);
SuperModelProvider superModelProvider = mock(SuperModelProvider.class);
SuperModel superModel = mock(SuperModel.class);
when(superModelProvider.snapshot(listener)).thenReturn(superModel);
ApplicationInfo application1 = mock(ApplicationInfo.class);
ApplicationInfo application2 = mock(ApplicationInfo.class);
List<ApplicationInfo> applications = Stream.of(application1, application2).collect(Collectors.toList());
when(superModel.getAllApplicationInfos()).thenReturn(applications);
listener.start(superModelProvider);
verify(slobrokMonitorManager).applicationActivated(superModel, application1);
verify(slobrokMonitorManager).applicationActivated(superModel, application2);
ServiceModel serviceModel = listener.get();
verify(modelGenerator).toServiceModel(superModel, zone, configServers, slobrokMonitorManager);
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class InactiveAndFailedExpirerTest method inactive_and_failed_times_out.
@Test
public void inactive_and_failed_times_out() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")));
List<Node> nodes = tester.makeReadyNodes(2, "default");
// Allocate then deallocate 2 nodes
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
List<HostSpec> preparedNodes = tester.prepare(applicationId, cluster, Capacity.fromNodeCount(2), 1);
tester.activate(applicationId, new HashSet<>(preparedNodes));
assertEquals(2, tester.getNodes(applicationId, Node.State.active).size());
tester.deactivate(applicationId);
List<Node> inactiveNodes = tester.getNodes(applicationId, Node.State.inactive).asList();
assertEquals(2, inactiveNodes.size());
// Inactive times out
tester.advanceTime(Duration.ofMinutes(14));
new InactiveExpirer(tester.nodeRepository(), tester.clock(), Duration.ofMinutes(10), new JobControl(tester.nodeRepository().database())).run();
assertEquals(0, tester.nodeRepository().getNodes(Node.State.inactive).size());
List<Node> dirty = tester.nodeRepository().getNodes(Node.State.dirty);
assertEquals(2, dirty.size());
assertFalse(dirty.get(0).allocation().isPresent());
assertFalse(dirty.get(1).allocation().isPresent());
// One node is set back to ready
Node ready = tester.nodeRepository().setReady(Collections.singletonList(dirty.get(0)), Agent.system, getClass().getSimpleName()).get(0);
assertEquals("Allocated history is removed on readying", Arrays.asList(History.Event.Type.provisioned, History.Event.Type.readied), ready.history().events().stream().map(History.Event::type).collect(Collectors.toList()));
// Dirty times out for the other one
tester.advanceTime(Duration.ofMinutes(14));
new DirtyExpirer(tester.nodeRepository(), tester.clock(), Duration.ofMinutes(10), new JobControl(tester.nodeRepository().database())).run();
assertEquals(0, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.dirty).size());
List<Node> failed = tester.nodeRepository().getNodes(NodeType.tenant, Node.State.failed);
assertEquals(1, failed.size());
assertEquals(1, failed.get(0).status().failCount());
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method relocate_nodes_from_headroom_hosts.
/**
* Test relocation of nodes that violate headroom.
* <p>
* Setup 4 docker hosts and allocate one container on each (from two different applications)
* No spares - only headroom (4xd-2)
* <p>
* One application is now violating headroom and need relocation
* <p>
* Initial allocation of app 1 and 2 --> final allocation (headroom marked as H):
* <p>
* | H | H | H | H | | | | | |
* | H | H | H1a | H1b | --> | | | | |
* | | | 2a | 2b | | 1a | 1b | 2a | 2b |
*/
@Test
public void relocate_nodes_from_headroom_hosts() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application1, "1a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application1, "1b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), flavor, 0, tester);
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy one of the applications
deployapp(application1, clusterSpec1, flavor, tester, 2);
// Assert that the nodes are spread across all hosts (to allow headroom)
Set<String> hostsWithChildren = new HashSet<>();
for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) {
if (!isInactiveOrRetired(node)) {
hostsWithChildren.add(node.parentHostname().get());
}
}
Assert.assertEquals(4, hostsWithChildren.size());
}
use of com.yahoo.config.provision.Zone in project vespa by vespa-engine.
the class DynamicDockerProvisioningTest method new_docker_nodes_are_marked_as_headroom_violations.
/**
* Test that new docker nodes that will result in headroom violations are
* correctly marked as this.
* <p>
* When redeploying app1 - should not do anything (as moving app1 to host 0 and 1 would violate headroom).
* Then redeploy app 2 - should cause a relocation.
* <p>
* | H | H | H2a | H2b | | H | H | H | H |
* | H | H | H1a | H1b | --> | H | H | H1a | H1b |
* | | | 1a | 1b | | 2a | 2b | 1a | 1b |
*/
@Test
public void new_docker_nodes_are_marked_as_headroom_violations() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
deployZoneApp(tester);
List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
// Application 1
ApplicationId application1 = makeApplicationId("t1", "1");
ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
String hostParent2 = dockerHosts.get(2).hostname();
String hostParent3 = dockerHosts.get(3).hostname();
addAndAssignNode(application1, "1a", hostParent2, flavorD2, 0, tester);
addAndAssignNode(application1, "1b", hostParent3, flavorD2, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "2");
ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"), false);
addAndAssignNode(application2, "2a", hostParent2, flavorD1, 0, tester);
addAndAssignNode(application2, "2b", hostParent3, flavorD1, 1, tester);
// Assert allocation placement - prior to re-deployment
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 1
deployapp(application1, clusterSpec1, flavorD2, tester, 2);
// Re-assert allocation placement
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
// Redeploy application 2
deployapp(application2, clusterSpec2, flavorD1, tester, 2);
// Now app2 should have re-located
assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
}
Aggregations