use of com.cloud.offering.DiskOfferingInfo in project cloudstack by apache.
the class CloudOrchestrator method createVirtualMachineFromScratch.
@Override
public VirtualMachineEntity createVirtualMachineFromScratch(String id, String owner, String isoId, String hostName, String displayName, String hypervisor, String os, int cpu, int speed, long memory, Long diskSize, List<String> computeTags, List<String> rootDiskTags, Map<String, NicProfile> networkNicMap, DeploymentPlan plan) throws InsufficientCapacityException {
// VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks, vmEntityManager);
VirtualMachineEntityImpl vmEntity = ComponentContext.inject(VirtualMachineEntityImpl.class);
vmEntity.init(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, new ArrayList<String>(networkNicMap.keySet()));
//load vm instance and offerings and call virtualMachineManagerImpl
VMInstanceVO vm = _vmDao.findByUuid(id);
ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo();
rootDiskOfferingInfo.setDiskOffering(computeOffering);
Long diskOfferingId = vm.getDiskOfferingId();
if (diskOfferingId == null) {
throw new InvalidParameterValueException("Installing from ISO requires a disk offering to be specified for the root disk.");
}
DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId);
if (diskOffering == null) {
throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingId);
}
Long size = null;
if (diskOffering.getDiskSize() == 0) {
size = diskSize;
if (size == null) {
throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter.");
}
_volumeMgr.validateVolumeSizeRange(size * 1024 * 1024 * 1024);
}
rootDiskOfferingInfo.setDiskOffering(diskOffering);
rootDiskOfferingInfo.setSize(size);
if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
String minIops = userVmDetails.get("minIopsDo");
String maxIops = userVmDetails.get("maxIopsDo");
rootDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
rootDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
}
}
LinkedHashMap<Network, List<? extends NicProfile>> networkIpMap = new LinkedHashMap<Network, List<? extends NicProfile>>();
for (String uuid : networkNicMap.keySet()) {
NetworkVO network = _networkDao.findByUuid(uuid);
if (network != null) {
networkIpMap.put(network, new ArrayList<NicProfile>(Arrays.asList(networkNicMap.get(uuid))));
}
}
HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType);
return vmEntity;
}
use of com.cloud.offering.DiskOfferingInfo in project cloudstack by apache.
the class VirtualMachineManagerImpl method allocate.
@Override
@DB
public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final DiskOfferingInfo rootDiskOfferingInfo, final List<DiskOfferingInfo> dataDiskOfferings, final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType) throws InsufficientCapacityException {
final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName);
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Allocating entries for VM: " + vm);
}
vm.setDataCenterId(plan.getDataCenterId());
if (plan.getPodId() != null) {
vm.setPodIdToDeployIn(plan.getPodId());
}
assert plan.getClusterId() == null && plan.getPoolId() == null : "We currently don't support cluster and pool preset yet";
final VMInstanceVO vmFinal = _vmDao.persist(vm);
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null);
Transaction.execute(new TransactionCallbackWithExceptionNoReturn<InsufficientCapacityException>() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Allocating nics for " + vmFinal);
}
try {
_networkMgr.allocate(vmProfile, auxiliaryNetworks);
} catch (final ConcurrentOperationException e) {
throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Allocating disks for " + vmFinal);
}
if (template.getFormat() == ImageFormat.ISO) {
volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
} else if (template.getFormat() == ImageFormat.BAREMETAL) {
// Do nothing
} else {
volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner);
}
if (dataDiskOfferings != null) {
for (final DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) {
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(), dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
}
}
}
});
if (s_logger.isDebugEnabled()) {
s_logger.debug("Allocation completed for VM: " + vmFinal);
}
}
use of com.cloud.offering.DiskOfferingInfo in project cloudstack by apache.
the class CloudOrchestrator method createVirtualMachine.
@Override
public VirtualMachineEntity createVirtualMachine(String id, String owner, String templateId, String hostName, String displayName, String hypervisor, int cpu, int speed, long memory, Long diskSize, List<String> computeTags, List<String> rootDiskTags, Map<String, NicProfile> networkNicMap, DeploymentPlan plan, Long rootDiskSize) throws InsufficientCapacityException {
// VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks,
// vmEntityManager);
LinkedHashMap<NetworkVO, List<? extends NicProfile>> networkIpMap = new LinkedHashMap<NetworkVO, List<? extends NicProfile>>();
for (String uuid : networkNicMap.keySet()) {
NetworkVO network = _networkDao.findByUuid(uuid);
if (network != null) {
networkIpMap.put(network, new ArrayList<NicProfile>(Arrays.asList(networkNicMap.get(uuid))));
}
}
VirtualMachineEntityImpl vmEntity = ComponentContext.inject(VirtualMachineEntityImpl.class);
vmEntity.init(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, new ArrayList<String>(networkNicMap.keySet()));
HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
//load vm instance and offerings and call virtualMachineManagerImpl
VMInstanceVO vm = _vmDao.findByUuid(id);
// If the template represents an ISO, a disk offering must be passed in, and will be used to create the root disk
// Else, a disk offering is optional, and if present will be used to create the data disk
DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo();
List<DiskOfferingInfo> dataDiskOfferings = new ArrayList<DiskOfferingInfo>();
ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
rootDiskOfferingInfo.setDiskOffering(computeOffering);
rootDiskOfferingInfo.setSize(rootDiskSize);
if (computeOffering.isCustomizedIops() != null && computeOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
String minIops = userVmDetails.get("minIops");
String maxIops = userVmDetails.get("maxIops");
rootDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
rootDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
}
}
if (vm.getDiskOfferingId() != null) {
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vm.getDiskOfferingId());
if (diskOffering == null) {
throw new InvalidParameterValueException("Unable to find disk offering " + vm.getDiskOfferingId());
}
Long size = null;
if (diskOffering.getDiskSize() == 0) {
size = diskSize;
if (size == null) {
throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter.");
}
_volumeMgr.validateVolumeSizeRange(size * 1024 * 1024 * 1024);
}
DiskOfferingInfo dataDiskOfferingInfo = new DiskOfferingInfo();
dataDiskOfferingInfo.setDiskOffering(diskOffering);
dataDiskOfferingInfo.setSize(size);
if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) {
Map<String, String> userVmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId());
if (userVmDetails != null) {
String minIops = userVmDetails.get("minIopsDo");
String maxIops = userVmDetails.get("maxIopsDo");
dataDiskOfferingInfo.setMinIops(minIops != null && minIops.trim().length() > 0 ? Long.parseLong(minIops) : null);
dataDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null);
}
}
dataDiskOfferings.add(dataDiskOfferingInfo);
}
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(templateId)), computeOffering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan, hypervisorType);
return vmEntity;
}
Aggregations