use of com.cloud.hypervisor.Hypervisor.HypervisorType in project cloudstack by apache.
the class VmwareResource method examineStorageSubSystemCommandFullCloneFlagForVmware.
/**
* Examine StorageSubSystem command to get full clone flag, if provided
* @param cmd command to execute
* @param params params
* @return copy of params including new values, if suitable
*/
protected EnumMap<VmwareStorageProcessorConfigurableFields, Object> examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
EnumMap<VmwareStorageProcessorConfigurableFields, Object> paramsCopy = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>(params);
HypervisorType hypervisor = cmd.getDestTO().getHypervisorType();
if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)) {
DataStoreTO destDataStore = cmd.getDestTO().getDataStore();
if (destDataStore instanceof PrimaryDataStoreTO) {
PrimaryDataStoreTO dest = (PrimaryDataStoreTO) destDataStore;
if (dest.isFullCloneFlag() != null) {
paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue());
}
}
}
return paramsCopy;
}
use of com.cloud.hypervisor.Hypervisor.HypervisorType in project cloudstack by apache.
the class ConsoleProxyManagerImpl method startNew.
public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationException {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Assign console proxy from a newly started instance for request from data center : " + dataCenterId);
}
if (!allowToLaunchNew(dataCenterId)) {
s_logger.warn("The number of launched console proxy on zone " + dataCenterId + " has reached to limit");
return null;
}
VMTemplateVO template = null;
HypervisorType availableHypervisor = _resourceMgr.getAvailableHypervisor(dataCenterId);
template = _templateDao.findSystemVMReadyTemplate(dataCenterId, availableHypervisor);
if (template == null) {
throw new CloudRuntimeException("Not able to find the System templates or not downloaded in zone " + dataCenterId);
}
Map<String, Object> context = createProxyInstance(dataCenterId, template);
long proxyVmId = (Long) context.get("proxyVmId");
if (proxyVmId == 0) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Creating proxy instance failed, data center id : " + dataCenterId);
}
return null;
}
ConsoleProxyVO proxy = _consoleProxyDao.findById(proxyVmId);
if (proxy != null) {
SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_CREATED, dataCenterId, proxy.getId(), proxy, null));
return proxy;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to allocate console proxy storage, remove the console proxy record from DB, proxy id: " + proxyVmId);
}
}
return null;
}
use of com.cloud.hypervisor.Hypervisor.HypervisorType in project cloudstack by apache.
the class VolumeApiServiceImpl method orchestrateAttachVolumeToVM.
private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long deviceId) {
VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
if (volumeToAttach.isAttachedVM()) {
throw new CloudRuntimeException("This volume is already attached to a VM.");
}
UserVmVO vm = _userVmDao.findById(vmId);
VolumeVO exstingVolumeOfVm = null;
List<VolumeVO> rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.ROOT);
if (rootVolumesOfVm.size() > 1) {
throw new CloudRuntimeException("The VM " + vm.getHostName() + " has more than one ROOT volume and is in an invalid state.");
} else {
if (!rootVolumesOfVm.isEmpty()) {
exstingVolumeOfVm = rootVolumesOfVm.get(0);
} else {
// locate data volume of the vm
List<VolumeVO> diskVolumesOfVm = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
for (VolumeVO diskVolume : diskVolumesOfVm) {
if (diskVolume.getState() != Volume.State.Allocated) {
exstingVolumeOfVm = diskVolume;
break;
}
}
}
}
HypervisorType rootDiskHyperType = vm.getHypervisorType();
HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
VolumeInfo newVolumeOnPrimaryStorage = volumeToAttach;
//don't create volume on primary storage if its being attached to the vm which Root's volume hasn't been created yet
StoragePoolVO destPrimaryStorage = null;
if (exstingVolumeOfVm != null && !exstingVolumeOfVm.getState().equals(Volume.State.Allocated)) {
destPrimaryStorage = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId());
}
boolean volumeOnSecondary = volumeToAttach.getState() == Volume.State.Uploaded;
if (destPrimaryStorage != null && (volumeToAttach.getState() == Volume.State.Allocated || volumeOnSecondary)) {
try {
newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, rootDiskHyperType, destPrimaryStorage);
} catch (NoTransitionException e) {
s_logger.debug("Failed to create volume on primary storage", e);
throw new CloudRuntimeException("Failed to create volume on primary storage", e);
}
}
// reload the volume from db
newVolumeOnPrimaryStorage = volFactory.getVolume(newVolumeOnPrimaryStorage.getId());
boolean moveVolumeNeeded = needMoveVolume(exstingVolumeOfVm, newVolumeOnPrimaryStorage);
if (moveVolumeNeeded) {
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo) newVolumeOnPrimaryStorage.getDataStore();
if (primaryStore.isLocal()) {
throw new CloudRuntimeException("Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() + " as migration of local data volume is not allowed");
}
StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId());
try {
newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(), volumeToAttachHyperType);
} catch (ConcurrentOperationException e) {
s_logger.debug("move volume failed", e);
throw new CloudRuntimeException("move volume failed", e);
} catch (StorageUnavailableException e) {
s_logger.debug("move volume failed", e);
throw new CloudRuntimeException("move volume failed", e);
}
}
VolumeVO newVol = _volsDao.findById(newVolumeOnPrimaryStorage.getId());
// Getting the fresh vm object in case of volume migration to check the current state of VM
if (moveVolumeNeeded || volumeOnSecondary) {
vm = _userVmDao.findById(vmId);
if (vm == null) {
throw new InvalidParameterValueException("VM not found.");
}
}
newVol = sendAttachVolumeCommand(vm, newVol, deviceId);
return newVol;
}
use of com.cloud.hypervisor.Hypervisor.HypervisorType in project cloudstack by apache.
the class VolumeApiServiceImpl method resizeVolume.
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true)
public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException {
Long newSize = null;
Long newMinIops = null;
Long newMaxIops = null;
Integer newHypervisorSnapshotReserve = null;
boolean shrinkOk = cmd.getShrinkOk();
VolumeVO volume = _volsDao.findById(cmd.getEntityId());
if (volume == null) {
throw new InvalidParameterValueException("No such volume");
}
/* Does the caller have authority to act on this volume? */
_accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
if (volume.getInstanceId() != null) {
// Check that Vm to which this volume is attached does not have VM Snapshots
if (_vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) {
throw new InvalidParameterValueException("Volume cannot be resized which is attached to VM with VM Snapshots");
}
}
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
DiskOfferingVO newDiskOffering = null;
if (cmd.getNewDiskOfferingId() != null && volume.getDiskOfferingId() != cmd.getNewDiskOfferingId()) {
newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId());
}
/* Only works for KVM/XenServer/VMware (or "Any") for now, and volumes with 'None' since they're just allocated in DB */
HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId());
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any && hypervisorType != HypervisorType.None) {
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
}
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {
throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + volume.getUuid() + " is in state " + volume.getState() + ".");
}
// if we are to use the existing disk offering
if (newDiskOffering == null) {
newSize = cmd.getSize();
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve();
// if the caller is looking to change the size of the volume
if (newSize != null) {
if (!diskOffering.isCustomized() && !volume.getVolumeType().equals(Volume.Type.ROOT)) {
throw new InvalidParameterValueException("To change a volume's size without providing a new disk offering, its current disk offering must be " + "customizable or it must be a root volume (if providing a disk offering, make sure it is different from the current disk offering).");
}
// convert from bytes to GiB
newSize = newSize << 30;
} else {
// no parameter provided; just use the original size of the volume
newSize = volume.getSize();
}
newMinIops = cmd.getMinIops();
if (newMinIops != null) {
if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter.");
}
} else {
// no parameter provided; just use the original min IOPS of the volume
newMinIops = volume.getMinIops();
}
newMaxIops = cmd.getMaxIops();
if (newMaxIops != null) {
if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter.");
}
} else {
// no parameter provided; just use the original max IOPS of the volume
newMaxIops = volume.getMaxIops();
}
validateIops(newMinIops, newMaxIops);
} else {
if (newDiskOffering.getRemoved() != null) {
throw new InvalidParameterValueException("Requested disk offering has been removed.");
}
if (!DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) {
throw new InvalidParameterValueException("Requested disk offering type is invalid.");
}
if (diskOffering.getTags() != null) {
if (!StringUtils.areTagsEqual(diskOffering.getTags(), newDiskOffering.getTags())) {
throw new InvalidParameterValueException("The tags on the new and old disk offerings must match.");
}
} else if (newDiskOffering.getTags() != null) {
throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well.");
}
if (newDiskOffering.getDomainId() != null) {
// not a public offering; check access
_configMgr.checkDiskOfferingAccess(CallContext.current().getCallingAccount(), newDiskOffering);
}
if (newDiskOffering.isCustomized()) {
newSize = cmd.getSize();
if (newSize == null) {
throw new InvalidParameterValueException("The new disk offering requires that a size be specified.");
}
// convert from bytes to GiB
newSize = newSize << 30;
} else {
newSize = newDiskOffering.getDiskSize();
}
if (!volume.getSize().equals(newSize) && !volume.getVolumeType().equals(Volume.Type.DATADISK)) {
throw new InvalidParameterValueException("Only data volumes can be resized via a new disk offering.");
}
if (newDiskOffering.isCustomizedIops() != null && newDiskOffering.isCustomizedIops()) {
newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops();
newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops();
validateIops(newMinIops, newMaxIops);
} else {
newMinIops = newDiskOffering.getMinIops();
newMaxIops = newDiskOffering.getMaxIops();
}
// if the hypervisor snapshot reserve value is null, it must remain null (currently only KVM uses null and null is all KVM uses for a value here)
newHypervisorSnapshotReserve = volume.getHypervisorSnapshotReserve() != null ? newDiskOffering.getHypervisorSnapshotReserve() : null;
}
long currentSize = volume.getSize();
// if the caller is looking to change the size of the volume
if (currentSize != newSize) {
if (!validateVolumeSizeRange(newSize)) {
throw new InvalidParameterValueException("Requested size out of range");
}
/*
* Let's make certain they (think they) know what they're doing if they
* want to shrink by forcing them to provide the shrinkok parameter.
* This will be checked again at the hypervisor level where we can see
* the actual disk size.
*/
if (currentSize > newSize && !shrinkOk) {
throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." + "Need to sign off by supplying the shrinkok parameter with value of true.");
}
if (newSize > currentSize) {
/* Check resource limit for this account on primary storage resource */
_resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()), ResourceType.primary_storage, volume.isDisplayVolume(), new Long(newSize - currentSize).longValue());
}
}
/* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */
if (volume.getState() == Volume.State.Allocated) {
s_logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS.");
volume.setSize(newSize);
volume.setMinIops(newMinIops);
volume.setMaxIops(newMaxIops);
volume.setHypervisorSnapshotReserve(newHypervisorSnapshotReserve);
if (newDiskOffering != null) {
volume.setDiskOfferingId(cmd.getNewDiskOfferingId());
}
_volsDao.update(volume.getId(), volume);
return volume;
}
UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
if (userVm != null) {
if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState() != VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware) {
s_logger.error(" For ROOT volume resize VM should be in Power Off state.");
throw new InvalidParameterValueException("VM current state is : " + userVm.getPowerState() + ". But VM should be in " + VirtualMachine.PowerState.PowerOff + " state.");
}
// serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
VmWorkJobVO placeHolder = null;
placeHolder = createPlaceHolderWork(userVm.getId());
try {
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
} finally {
_workJobDao.expunge(placeHolder.getId());
}
} else {
Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
try {
outcome.get();
} catch (InterruptedException e) {
throw new RuntimeException("Operation was interrupted", e);
} catch (java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution exception", e);
}
Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof ResourceAllocationException) {
throw (ResourceAllocationException) jobResult;
} else if (jobResult instanceof RuntimeException) {
throw (RuntimeException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
} else if (jobResult instanceof Long) {
return _volsDao.findById((Long) jobResult);
}
}
return volume;
}
}
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
}
use of com.cloud.hypervisor.Hypervisor.HypervisorType in project cloudstack by apache.
the class SnapshotManagerImpl method allocSnapshot.
@Override
public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType) throws ResourceAllocationException {
Account caller = CallContext.current().getCallingAccount();
VolumeInfo volume = volFactory.getVolume(volumeId);
supportedByHypervisor(volume);
// Verify permissions
_accountMgr.checkAccess(caller, null, true, volume);
Type snapshotType = getSnapshotType(policyId);
Account owner = _accountMgr.getAccount(volume.getAccountId());
try {
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot);
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue());
} catch (ResourceAllocationException e) {
if (snapshotType != Type.MANUAL) {
String msg = "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots";
s_logger.warn(msg);
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots; please use updateResourceLimit to increase the limit");
}
throw e;
}
// Determine the name for this snapshot
// Snapshot Name: VMInstancename + volumeName + timeString
String timeString = DateUtil.getDateDisplayString(DateUtil.GMT_TIMEZONE, new Date(), DateUtil.YYYYMMDD_FORMAT);
VMInstanceVO vmInstance = _vmDao.findById(volume.getInstanceId());
String vmDisplayName = "detached";
if (vmInstance != null) {
vmDisplayName = vmInstance.getHostName();
}
if (snapshotName == null)
snapshotName = vmDisplayName + "_" + volume.getName() + "_" + timeString;
HypervisorType hypervisorType = HypervisorType.None;
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getDataStore().getId());
if (storagePool.getScope() == ScopeType.ZONE) {
hypervisorType = storagePool.getHypervisor();
// at the time being, managed storage only supports XenServer, ESX(i), and KVM (i.e. not Hyper-V), so the VHD file type can be mapped to XenServer
if (storagePool.isManaged() && HypervisorType.Any.equals(hypervisorType) && ImageFormat.VHD.equals(volume.getFormat())) {
hypervisorType = HypervisorType.XenServer;
}
} else {
hypervisorType = volume.getHypervisorType();
}
SnapshotVO snapshotVO = new SnapshotVO(volume.getDataCenterId(), volume.getAccountId(), volume.getDomainId(), volume.getId(), volume.getDiskOfferingId(), snapshotName, (short) snapshotType.ordinal(), snapshotType.name(), volume.getSize(), volume.getMinIops(), volume.getMaxIops(), hypervisorType, locationType);
SnapshotVO snapshot = _snapshotDao.persist(snapshotVO);
if (snapshot == null) {
throw new CloudRuntimeException("Failed to create snapshot for volume: " + volume.getId());
}
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, new Long(volume.getSize()));
return snapshot;
}
Aggregations