use of com.cloud.offering.ServiceOffering in project cosmic by MissionCriticalCloud.
the class UpgradeVMCmdByAdmin method execute.
@Override
public void execute() throws ResourceAllocationException {
CallContext.current().setEventDetails("Vm Id: " + getId());
final ServiceOffering serviceOffering = _entityMgr.findById(ServiceOffering.class, serviceOfferingId);
if (serviceOffering == null) {
throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId);
}
final UserVm result = _userVmService.upgradeVirtualMachine(this);
if (result != null) {
final UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", result).get(0);
response.setResponseName(getCommandName());
setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to upgrade vm");
}
}
use of com.cloud.offering.ServiceOffering in project cosmic by MissionCriticalCloud.
the class UpgradeSystemVMCmd method execute.
@Override
public void execute() {
CallContext.current().setEventDetails("Vm Id: " + getId());
final ServiceOffering serviceOffering = _entityMgr.findById(ServiceOffering.class, serviceOfferingId);
if (serviceOffering == null) {
throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId);
}
final VirtualMachine result = _mgr.upgradeSystemVM(this);
if (result != null) {
final SystemVmResponse response = _responseGenerator.createSystemVmResponse(result);
response.setResponseName(getCommandName());
setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Fail to reboot system vm");
}
}
use of com.cloud.offering.ServiceOffering in project cosmic by MissionCriticalCloud.
the class UpdateServiceOfferingCmd method execute.
// ///////////////////////////////////////////////////
// ///////////// API Implementation///////////////////
// ///////////////////////////////////////////////////
@Override
public void execute() {
// Note
// Once an offering is created, we cannot update the domainId field (keeping consistent with zones logic)
final ServiceOffering result = _configService.updateServiceOffering(this);
if (result != null) {
final ServiceOfferingResponse response = _responseGenerator.createServiceOfferingResponse(result);
response.setResponseName(getCommandName());
this.setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update service offering");
}
}
use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class UnmanagedVMsManagerImplTest method setUp.
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
AccountVO account = new AccountVO("admin", 1L, "", Account.ACCOUNT_TYPE_ADMIN, "uuid");
UserVO user = new UserVO(1, "adminuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
CallContext.register(user, account);
UnmanagedInstanceTO instance = new UnmanagedInstanceTO();
instance.setName("TestInstance");
instance.setCpuCores(2);
instance.setCpuCoresPerSocket(1);
instance.setCpuSpeed(1000);
instance.setMemory(1024);
instance.setOperatingSystem("CentOS 7");
List<UnmanagedInstanceTO.Disk> instanceDisks = new ArrayList<>();
UnmanagedInstanceTO.Disk instanceDisk = new UnmanagedInstanceTO.Disk();
instanceDisk.setDiskId("1000-1");
instanceDisk.setLabel("DiskLabel");
instanceDisk.setController("scsi");
instanceDisk.setImagePath("[b6ccf44a1fa13e29b3667b4954fa10ee] TestInstance/ROOT-1.vmdk");
instanceDisk.setCapacity(5242880L);
instanceDisk.setDatastoreName("Test");
instanceDisk.setDatastoreHost("Test");
instanceDisk.setDatastorePath("Test");
instanceDisk.setDatastoreType("NFS");
instanceDisks.add(instanceDisk);
instance.setDisks(instanceDisks);
List<UnmanagedInstanceTO.Nic> instanceNics = new ArrayList<>();
UnmanagedInstanceTO.Nic instanceNic = new UnmanagedInstanceTO.Nic();
instanceNic.setNicId("NIC 1");
instanceNic.setAdapterType("VirtualE1000E");
instanceNic.setMacAddress("02:00:2e:0f:00:02");
instanceNic.setVlan(1024);
instanceNics.add(instanceNic);
instance.setNics(instanceNics);
instance.setPowerState(UnmanagedInstanceTO.PowerState.PowerOn);
ClusterVO clusterVO = new ClusterVO(1L, 1L, "Cluster");
clusterVO.setHypervisorType(Hypervisor.HypervisorType.VMware.toString());
when(clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO);
when(configurationDao.getValue(Mockito.anyString())).thenReturn(null);
doNothing().when(resourceLimitService).checkResourceLimit(any(Account.class), any(Resource.ResourceType.class), anyLong());
List<HostVO> hosts = new ArrayList<>();
HostVO hostVO = Mockito.mock(HostVO.class);
when(hostVO.isInMaintenanceStates()).thenReturn(false);
hosts.add(hostVO);
when(hostVO.checkHostServiceOfferingTags(Mockito.any())).thenReturn(true);
when(resourceManager.listHostsInClusterByStatus(Mockito.anyLong(), Mockito.any(Status.class))).thenReturn(hosts);
List<VMTemplateStoragePoolVO> templates = new ArrayList<>();
when(templatePoolDao.listAll()).thenReturn(templates);
List<VolumeVO> volumes = new ArrayList<>();
when(volumeDao.findIncludingRemovedByZone(Mockito.anyLong())).thenReturn(volumes);
List<VMInstanceVO> vms = new ArrayList<>();
when(vmDao.listByHostId(Mockito.anyLong())).thenReturn(vms);
when(vmDao.listByLastHostIdAndStates(Mockito.anyLong())).thenReturn(vms);
GetUnmanagedInstancesCommand cmd = Mockito.mock(GetUnmanagedInstancesCommand.class);
HashMap<String, UnmanagedInstanceTO> map = new HashMap<>();
map.put(instance.getName(), instance);
Answer answer = new GetUnmanagedInstancesAnswer(cmd, "", map);
when(agentManager.easySend(Mockito.anyLong(), Mockito.any(GetUnmanagedInstancesCommand.class))).thenReturn(answer);
DataCenterVO zone = Mockito.mock(DataCenterVO.class);
when(zone.getId()).thenReturn(1L);
when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone);
when(accountService.getActiveAccountById(Mockito.anyLong())).thenReturn(Mockito.mock(Account.class));
List<UserVO> users = new ArrayList<>();
users.add(Mockito.mock(UserVO.class));
when(userDao.listByAccount(Mockito.anyLong())).thenReturn(users);
VMTemplateVO template = Mockito.mock(VMTemplateVO.class);
when(template.getId()).thenReturn(1L);
when(template.getName()).thenReturn("Template");
when(templateDao.findById(Mockito.anyLong())).thenReturn(template);
when(templateDao.findByName(Mockito.anyString())).thenReturn(template);
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
when(serviceOffering.getId()).thenReturn(1L);
when(serviceOffering.isDynamic()).thenReturn(false);
when(serviceOffering.getCpu()).thenReturn(instance.getCpuCores());
when(serviceOffering.getRamSize()).thenReturn(instance.getMemory());
when(serviceOffering.getSpeed()).thenReturn(instance.getCpuSpeed());
when(serviceOfferingDao.findById(Mockito.anyLong())).thenReturn(serviceOffering);
DiskOfferingVO diskOfferingVO = Mockito.mock(DiskOfferingVO.class);
when(diskOfferingVO.getTags()).thenReturn("");
when(diskOfferingVO.isCustomized()).thenReturn(false);
when(diskOfferingVO.getDiskSize()).thenReturn(Long.MAX_VALUE);
when(diskOfferingDao.findById(Mockito.anyLong())).thenReturn(diskOfferingVO);
UserVmVO userVm = Mockito.mock(UserVmVO.class);
when(userVm.getAccountId()).thenReturn(1L);
when(userVm.getDataCenterId()).thenReturn(1L);
when(userVm.getHostName()).thenReturn(instance.getName());
when(userVm.getTemplateId()).thenReturn(1L);
when(userVm.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.VMware);
when(userVm.getUuid()).thenReturn("abcd");
when(userVm.isDisplayVm()).thenReturn(true);
// Skip usage publishing and resource increment for test
when(userVm.getType()).thenReturn(VirtualMachine.Type.Instance);
userVm.setInstanceName(instance.getName());
userVm.setHostName(instance.getName());
StoragePoolVO poolVO = Mockito.mock(StoragePoolVO.class);
when(poolVO.getDataCenterId()).thenReturn(1L);
when(poolVO.getClusterId()).thenReturn(clusterVO.getId());
List<StoragePoolVO> pools = new ArrayList<>();
pools.add(poolVO);
when(primaryDataStoreDao.listPoolByHostPath(Mockito.anyString(), Mockito.anyString())).thenReturn(pools);
when(userVmManager.importVM(nullable(DataCenter.class), nullable(Host.class), nullable(VirtualMachineTemplate.class), nullable(String.class), nullable(String.class), nullable(Account.class), nullable(String.class), nullable(Account.class), nullable(Boolean.class), nullable(String.class), nullable(Long.class), nullable(Long.class), nullable(ServiceOffering.class), nullable(String.class), nullable(String.class), nullable(Hypervisor.HypervisorType.class), nullable(Map.class), nullable(VirtualMachine.PowerState.class))).thenReturn(userVm);
when(volumeApiService.doesTargetStorageSupportDiskOffering(Mockito.any(StoragePool.class), Mockito.anyString())).thenReturn(true);
NetworkVO networkVO = Mockito.mock(NetworkVO.class);
when(networkVO.getGuestType()).thenReturn(Network.GuestType.L2);
when(networkVO.getBroadcastUri()).thenReturn(URI.create(String.format("vlan://%d", instanceNic.getVlan())));
when(networkVO.getDataCenterId()).thenReturn(1L);
when(networkDao.findById(Mockito.anyLong())).thenReturn(networkVO);
List<NetworkVO> networks = new ArrayList<>();
networks.add(networkVO);
when(networkDao.listByZone(Mockito.anyLong())).thenReturn(networks);
doNothing().when(networkModel).checkNetworkPermissions(Mockito.any(Account.class), Mockito.any(Network.class));
doNothing().when(networkModel).checkRequestedIpAddresses(Mockito.anyLong(), Mockito.any(Network.IpAddresses.class));
NicProfile profile = Mockito.mock(NicProfile.class);
Integer deviceId = 100;
Pair<NicProfile, Integer> pair = new Pair<NicProfile, Integer>(profile, deviceId);
when(networkOrchestrationService.importNic(nullable(String.class), nullable(Integer.class), nullable(Network.class), nullable(Boolean.class), nullable(VirtualMachine.class), nullable(Network.IpAddresses.class), anyBoolean())).thenReturn(pair);
when(volumeManager.importVolume(Mockito.any(Volume.Type.class), Mockito.anyString(), Mockito.any(DiskOffering.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(VirtualMachine.class), Mockito.any(VirtualMachineTemplate.class), Mockito.any(Account.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString())).thenReturn(Mockito.mock(DiskProfile.class));
when(volumeDao.findByInstance(Mockito.anyLong())).thenReturn(volumes);
List<UserVmResponse> userVmResponses = new ArrayList<>();
UserVmResponse userVmResponse = new UserVmResponse();
userVmResponse.setInstanceName(instance.getName());
userVmResponses.add(userVmResponse);
when(responseGenerator.createUserVmResponse(Mockito.any(ResponseObject.ResponseView.class), Mockito.anyString(), Mockito.any(UserVm.class))).thenReturn(userVmResponses);
when(vmDao.findById(virtualMachineId)).thenReturn(virtualMachine);
when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Running);
when(virtualMachine.getInstanceName()).thenReturn("i-2-7-VM");
when(virtualMachine.getId()).thenReturn(virtualMachineId);
VolumeVO volumeVO = mock(VolumeVO.class);
when(volumeDao.findByInstance(virtualMachineId)).thenReturn(Collections.singletonList(volumeVO));
when(volumeVO.getInstanceId()).thenReturn(virtualMachineId);
when(volumeVO.getId()).thenReturn(virtualMachineId);
when(nicDao.listByVmId(virtualMachineId)).thenReturn(Collections.singletonList(nicVO));
when(nicVO.getNetworkId()).thenReturn(1L);
when(networkDao.findById(1L)).thenReturn(networkVO);
}
use of com.cloud.offering.ServiceOffering in project cloudstack by apache.
the class DeploymentPlanningManagerImpl method planDeployment.
@Override
public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException {
ServiceOffering offering = vmProfile.getServiceOffering();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
VirtualMachine vm = vmProfile.getVirtualMachine();
DataCenter dc = _dcDao.findById(vm.getDataCenterId());
if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) {
checkForNonDedicatedResources(vmProfile, dc, avoids);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + toHumanReadableSize(ram_requested));
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
}
avoidDisabledResources(vmProfile, dc, avoids);
String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
String uefiFlag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.UefiFlag);
if (plan.getHostId() != null && haVmTag == null) {
Long hostIdSpecified = plan.getHostId();
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified);
}
HostVO host = _hostDao.findById(hostIdSpecified);
if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) {
_hostDao.loadDetails(host);
if (MapUtils.isNotEmpty(host.getDetails()) && host.getDetails().containsKey(Host.HOST_UEFI_ENABLE) && "false".equalsIgnoreCase(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
s_logger.debug("Cannot deploy to specified host as host does n't support uefi vm deployment, returning.");
return null;
}
}
if (host == null) {
s_logger.debug("The specified host cannot be found");
} else if (avoids.shouldAvoid(host)) {
s_logger.debug("The specified host is in avoid set");
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
}
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile);
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>(), displayStorage);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
// search for storage under the zone, pod, cluster of the host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// choose the potential pool for this VM for this host
if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm);
if (potentialResources != null) {
pod = _podDao.findById(host.getPodId());
cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// we don't have to prepare this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
}
}
s_logger.debug("Cannot deploy to specified host, returning.");
return null;
}
// call affinitygroup chain
long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
if (vmGroupCount > 0) {
for (AffinityGroupProcessor processor : _affinityProcessors) {
processor.process(vmProfile, plan, avoids);
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
// check if datacenter is in avoid set
if (avoids.shouldAvoid(dc)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.");
}
return null;
}
if (planner == null) {
String plannerName = offering.getDeploymentPlanner();
if (plannerName == null) {
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
plannerName = "BareMetalPlanner";
} else {
plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
}
}
planner = getDeploymentPlannerByName(plannerName);
}
if (vm.getLastHostId() != null && haVmTag == null) {
s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());
HostVO host = _hostDao.findById(vm.getLastHostId());
ServiceOfferingDetailsVO offeringDetails = null;
if (host == null) {
s_logger.debug("The last host of this VM cannot be found");
} else if (avoids.shouldAvoid(host)) {
s_logger.debug("The last host of this VM is in avoid set");
} else if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) {
s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + plan.getClusterId());
} else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
s_logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
} else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
if (!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) {
s_logger.debug("The last host of this VM does not have required GPU devices available");
}
} else {
if (host.getStatus() == Status.Up) {
if (checkVmProfileAndHost(vmProfile, host)) {
long cluster_id = host.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
boolean hostHasCpuCapability, hostHasCapacity = false;
hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
if (hostHasCpuCapability) {
// first check from reserved capacity
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true);
// if not reserved, check the free capacity
if (!hostHasCapacity)
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true);
}
boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile);
if (hostHasCapacity && hostHasCpuCapability) {
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>(), displayStorage);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
// search for storage under the zone, pod, cluster
// of
// the last host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// host
if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm);
if (potentialResources != null) {
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
}
} else {
s_logger.debug("The last host of this VM does not have enough capacity");
}
}
} else {
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + host.getResourceState());
}
}
s_logger.debug("Cannot choose the last host to deploy this VM ");
}
DeployDestination dest = null;
List<Long> clusterList = null;
if (planner != null && planner.canHandle(vmProfile, plan, avoids)) {
while (true) {
if (planner instanceof DeploymentClusterPlanner) {
ExcludeList plannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
if (clusterList != null && !clusterList.isEmpty()) {
// planner refactoring. call allocators to list hosts
ExcludeList plannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
if (dest != null) {
return dest;
}
// reset the avoid input to the planners
resetAvoidSet(avoids, plannerAvoidOutput);
} else {
return null;
}
} else {
dest = planner.plan(vmProfile, plan, avoids);
if (dest != null) {
long hostId = dest.getHost().getId();
avoids.addHost(dest.getHost().getId());
if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
// found destination
return dest;
} else {
// deployment picked it up for dedicated access
continue;
}
} else {
return null;
}
}
}
}
return dest;
}
Aggregations