use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.
the class ResourceManagerImpl method discoverCluster.
@DB
@Override
public List<? extends Cluster> discoverCluster(final AddClusterCmd cmd) throws IllegalArgumentException, DiscoveryException, ResourceInUseException {
final ResourceChecker resourceChecker = ResourceChecker.builder().dataCenterDao(this._dcDao).accountManager(this._accountMgr).hostPodDao(this._podDao).build();
final long dcId = cmd.getZoneId();
final long podId = cmd.getPodId();
final String clusterName = cmd.getClusterName();
String url = cmd.getUrl();
final String username = cmd.getUsername();
final String password = cmd.getPassword();
if (url != null) {
url = URLDecoder.decode(url);
}
final URI uri;
final DataCenterVO zone = resourceChecker.checkIfDataCenterExists(dcId);
final Account account = CallContext.current().getCallingAccount();
resourceChecker.checkIfDataCenterIsUsable(zone, account);
final HostPodVO pod = this._podDao.findById(podId);
if (pod == null) {
throw new InvalidParameterValueException("Can't find pod with specified podId " + podId);
}
// Check if the pod exists in the system
if (this._podDao.findById(podId) == null) {
throw new InvalidParameterValueException("Can't find pod by id " + podId);
}
// check if pod belongs to the zone
if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) {
final InvalidParameterValueException ex = new InvalidParameterValueException("Pod with specified id doesn't belong to the zone " + dcId);
ex.addProxyObject(pod.getUuid(), "podId");
ex.addProxyObject(zone.getUuid(), "dcId");
throw ex;
}
// Verify cluster information and create a new cluster if needed
if (clusterName == null || clusterName.isEmpty()) {
throw new InvalidParameterValueException("Please specify cluster name");
}
if (cmd.getHypervisor() == null || cmd.getHypervisor().isEmpty()) {
throw new InvalidParameterValueException("Please specify a hypervisor");
}
final HypervisorType hypervisorType = HypervisorType.getType(cmd.getHypervisor());
if (hypervisorType == null) {
s_logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type");
throw new InvalidParameterValueException("Unable to resolve " + cmd.getHypervisor() + " to a supported ");
}
Cluster.ClusterType clusterType = null;
if (cmd.getClusterType() != null && !cmd.getClusterType().isEmpty()) {
clusterType = Cluster.ClusterType.valueOf(cmd.getClusterType());
}
if (clusterType == null) {
clusterType = Cluster.ClusterType.CloudManaged;
}
AllocationState allocationState = null;
if (cmd.getAllocationState() != null && !cmd.getAllocationState().isEmpty()) {
try {
allocationState = AllocationState.valueOf(cmd.getAllocationState());
} catch (final IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve Allocation State '" + cmd.getAllocationState() + "' to a supported state");
}
}
if (allocationState == null) {
allocationState = AllocationState.Enabled;
}
final Discoverer discoverer = getMatchingDiscover(hypervisorType);
if (discoverer == null) {
throw new InvalidParameterValueException("Could not find corresponding resource manager for " + cmd.getHypervisor());
}
final List<ClusterVO> result = new ArrayList<>();
ClusterVO cluster = new ClusterVO(dcId, podId, clusterName);
cluster.setHypervisorType(hypervisorType.toString());
cluster.setClusterType(clusterType);
cluster.setAllocationState(allocationState);
try {
cluster = this._clusterDao.persist(cluster);
} catch (final Exception e) {
// no longer tolerate exception during the cluster creation phase
final CloudRuntimeException ex = new CloudRuntimeException("Unable to create cluster " + clusterName + " in pod and data center with specified ids", e);
// Get the pod VO object's table name.
ex.addProxyObject(pod.getUuid(), "podId");
ex.addProxyObject(zone.getUuid(), "dcId");
throw ex;
}
result.add(cluster);
if (clusterType == Cluster.ClusterType.CloudManaged) {
final Map<String, String> details = new HashMap<>();
details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString());
details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString());
this._clusterDetailsDao.persist(cluster.getId(), details);
return result;
}
// save cluster details for later cluster/host cross-checking
final Map<String, String> details = new HashMap<>();
details.put("url", url);
details.put("username", username);
details.put("password", password);
details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString());
details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString());
this._clusterDetailsDao.persist(cluster.getId(), details);
boolean success = false;
try {
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null) {
throw new InvalidParameterValueException("uri.scheme is null " + url + ", add http:// as a prefix");
} else if (uri.getScheme().equalsIgnoreCase("http")) {
if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) {
throw new InvalidParameterValueException("Your host and/or path is wrong. Make sure it's of the format http://hostname/path");
}
}
} catch (final URISyntaxException e) {
throw new InvalidParameterValueException(url + " is not a valid uri");
}
final List<HostVO> hosts = new ArrayList<>();
final Map<? extends ServerResource, Map<String, String>> resources;
resources = discoverer.find(dcId, podId, cluster.getId(), uri, username, password, null);
if (resources != null) {
for (final Map.Entry<? extends ServerResource, Map<String, String>> entry : resources.entrySet()) {
final ServerResource resource = entry.getKey();
final HostVO host = (HostVO) createHostAndAgent(resource, entry.getValue(), true, null, false);
if (host != null) {
hosts.add(host);
}
discoverer.postDiscovery(hosts, this._nodeId);
}
s_logger.info("External cluster has been successfully discovered by " + discoverer.getName());
success = true;
return result;
}
s_logger.warn("Unable to find the server resources at " + url);
throw new DiscoveryException("Unable to add the external cluster");
} finally {
if (!success) {
this._clusterDetailsDao.deleteDetails(cluster.getId());
this._clusterDao.remove(cluster.getId());
}
}
}
use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.
the class ResourceManagerImpl method updateCluster.
@Override
@DB
public Cluster updateCluster(final Cluster clusterToUpdate, final String clusterType, final String hypervisor, final String allocationState, final String managedstate) {
final ClusterVO cluster = (ClusterVO) clusterToUpdate;
// Verify cluster information and update the cluster if needed
boolean doUpdate = false;
if (hypervisor != null && !hypervisor.isEmpty()) {
final HypervisorType hypervisorType = HypervisorType.getType(hypervisor);
if (hypervisorType == null) {
s_logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type");
throw new InvalidParameterValueException("Unable to resolve " + hypervisor + " to a supported type");
} else {
cluster.setHypervisorType(hypervisor);
doUpdate = true;
}
}
final Cluster.ClusterType newClusterType;
if (clusterType != null && !clusterType.isEmpty()) {
try {
newClusterType = Cluster.ClusterType.valueOf(clusterType);
} catch (final IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type");
}
if (newClusterType == null) {
s_logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type");
throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type");
} else {
cluster.setClusterType(newClusterType);
doUpdate = true;
}
}
final AllocationState newAllocationState;
if (allocationState != null && !allocationState.isEmpty()) {
try {
newAllocationState = AllocationState.valueOf(allocationState);
} catch (final IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationState + "' to a supported state");
}
if (newAllocationState == null) {
s_logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State");
throw new InvalidParameterValueException("Unable to resolve " + allocationState + " to a supported state");
} else {
cluster.setAllocationState(newAllocationState);
doUpdate = true;
}
}
ManagedState newManagedState = null;
final ManagedState oldManagedState = cluster.getManagedState();
if (managedstate != null && !managedstate.isEmpty()) {
try {
newManagedState = ManagedState.valueOf(managedstate);
} catch (final IllegalArgumentException ex) {
throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state");
}
if (newManagedState == null) {
s_logger.error("Unable to resolve Managed State '" + managedstate + "' to a supported state");
throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state");
} else {
doUpdate = true;
}
}
if (doUpdate) {
this._clusterDao.update(cluster.getId(), cluster);
}
if (newManagedState != null && !newManagedState.equals(oldManagedState)) {
if (newManagedState.equals(ManagedState.Unmanaged)) {
boolean success = false;
try {
cluster.setManagedState(ManagedState.PrepareUnmanaged);
this._clusterDao.update(cluster.getId(), cluster);
List<HostVO> hosts = listAllUpAndEnabledHosts(HostType.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
for (final HostVO host : hosts) {
if (host.getType().equals(HostType.Routing) && !host.getStatus().equals(HostStatus.Down) && !host.getStatus().equals(HostStatus.Disconnected) && !host.getStatus().equals(HostStatus.Up) && !host.getStatus().equals(HostStatus.Alert)) {
final String msg = "host " + host.getPrivateIpAddress() + " should not be in " + host.getStatus().toString() + " status";
throw new CloudRuntimeException("PrepareUnmanaged Failed due to " + msg);
}
}
for (final HostVO host : hosts) {
if (host.getStatus().equals(HostStatus.Up)) {
umanageHost(host.getId());
}
}
final int retry = 40;
boolean lsuccess;
for (int i = 0; i < retry; i++) {
lsuccess = true;
try {
Thread.sleep(5 * 1000);
} catch (final Exception e) {
}
hosts = listAllUpAndEnabledHosts(HostType.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId());
for (final HostVO host : hosts) {
if (!host.getStatus().equals(HostStatus.Down) && !host.getStatus().equals(HostStatus.Disconnected) && !host.getStatus().equals(HostStatus.Alert)) {
lsuccess = false;
break;
}
}
if (lsuccess == true) {
success = true;
break;
}
}
if (success == false) {
throw new CloudRuntimeException("PrepareUnmanaged Failed due to some hosts are still in UP status after 5 Minutes, please try later ");
}
} finally {
cluster.setManagedState(success ? ManagedState.Unmanaged : ManagedState.PrepareUnmanagedError);
this._clusterDao.update(cluster.getId(), cluster);
}
} else if (newManagedState.equals(ManagedState.Managed)) {
cluster.setManagedState(ManagedState.Managed);
this._clusterDao.update(cluster.getId(), cluster);
}
}
return cluster;
}
use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.
the class XenserverSnapshotStrategy method backupSnapshot.
@Override
public SnapshotInfo backupSnapshot(final SnapshotInfo snapshot) {
final SnapshotInfo parentSnapshot = snapshot.getParent();
if (parentSnapshot != null && snapshot.getPath().equalsIgnoreCase(parentSnapshot.getPath())) {
s_logger.debug("backup an empty snapshot");
// don't need to backup this snapshot
final SnapshotDataStoreVO parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(parentSnapshot.getId(), DataStoreRole.Image);
if (parentSnapshotOnBackupStore != null && parentSnapshotOnBackupStore.getState() == State.Ready) {
final DataStore store = dataStoreMgr.getDataStore(parentSnapshotOnBackupStore.getDataStoreId(), parentSnapshotOnBackupStore.getRole());
final SnapshotInfo snapshotOnImageStore = (SnapshotInfo) store.create(snapshot);
snapshotOnImageStore.processEvent(Event.CreateOnlyRequested);
final SnapshotObjectTO snapTO = new SnapshotObjectTO();
snapTO.setPath(parentSnapshotOnBackupStore.getInstallPath());
final CreateObjectAnswer createSnapshotAnswer = new CreateObjectAnswer(snapTO);
snapshotOnImageStore.processEvent(Event.OperationSuccessed, createSnapshotAnswer);
final SnapshotObject snapObj = (SnapshotObject) snapshot;
try {
snapObj.processEvent(Snapshot.Event.OperationNotPerformed);
} catch (final NoTransitionException e) {
s_logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString());
throw new CloudRuntimeException(e.toString());
}
return snapshotDataFactory.getSnapshot(snapObj.getId(), store);
} else {
s_logger.debug("parent snapshot hasn't been backed up yet");
}
}
// determine full snapshot backup or not
boolean fullBackup = true;
SnapshotDataStoreVO parentSnapshotOnBackupStore = snapshotStoreDao.findLatestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Image);
final SnapshotDataStoreVO parentSnapshotOnPrimaryStore = snapshotStoreDao.findLatestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
final HypervisorType hypervisorType = snapshot.getBaseVolume().getHypervisorType();
if (parentSnapshotOnPrimaryStore != null && parentSnapshotOnBackupStore != null && hypervisorType == HypervisorType.XenServer) {
// CS does incremental backup
// only for XenServer
// In case of volume migration from one pool to other pool, CS should take full snapshot to avoid any issues with delta chain,
// to check if this is a migrated volume, compare the current pool id of volume and store_id of oldest snapshot on primary for this volume.
// Why oldest? Because at this point CS has two snapshot on primary entries for same volume, one with old pool_id and other one with
// current pool id. So, verify and if volume found to be migrated, delete snapshot entry with previous pool store_id.
final SnapshotDataStoreVO oldestSnapshotOnPrimary = snapshotStoreDao.findOldestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
final VolumeVO volume = volumeDao.findById(snapshot.getVolumeId());
if (oldestSnapshotOnPrimary != null) {
if (oldestSnapshotOnPrimary.getDataStoreId() == volume.getPoolId()) {
final int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"), SnapshotManager.DELTAMAX);
final int deltaSnap = _deltaSnapshotMax;
int i;
for (i = 1; i < deltaSnap; i++) {
final Long prevBackupId = parentSnapshotOnBackupStore.getParentSnapshotId();
if (prevBackupId == 0) {
break;
}
parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(prevBackupId, DataStoreRole.Image);
if (parentSnapshotOnBackupStore == null) {
break;
}
}
if (i >= deltaSnap) {
fullBackup = true;
} else {
fullBackup = false;
}
} else {
// if there is an snapshot entry for previousPool(primary storage) of migrated volume, delete it becasue CS created one more snapshot entry for current pool
snapshotStoreDao.remove(oldestSnapshotOnPrimary.getId());
}
}
}
snapshot.addPayload(fullBackup);
return snapshotSvr.backupSnapshot(snapshot);
}
use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method resizeVolume.
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true)
public VolumeVO resizeVolume(final ResizeVolumeCmd cmd) throws ResourceAllocationException {
Long newSize;
Long newMinIops;
Long newMaxIops;
final boolean shrinkOk = cmd.getShrinkOk();
final VolumeVO volume = this._volsDao.findById(cmd.getEntityId());
if (volume == null) {
throw new InvalidParameterValueException("No such volume");
}
/* Does the caller have authority to act on this volume? */
this._accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
if (volume.getInstanceId() != null) {
// Check that Vm to which this volume is attached does not have VM Snapshots
if (this._vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) {
throw new InvalidParameterValueException("Volume cannot be resized which is attached to VM with VM Snapshots");
}
}
final DiskOfferingVO diskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId());
DiskOfferingVO newDiskOffering = null;
if (cmd.getNewDiskOfferingId() != null && volume.getDiskOfferingId() != cmd.getNewDiskOfferingId()) {
newDiskOffering = this._diskOfferingDao.findById(cmd.getNewDiskOfferingId());
}
if (newDiskOffering != null && !newDiskOffering.isCustomized()) {
throw new InvalidParameterValueException("The disk offering for volume " + volume.getName() + " can only be changed to an offering that supports a custom disk size.");
}
if (diskOffering.isCustomized() && newDiskOffering != null && !newDiskOffering.isCustomized()) {
throw new InvalidParameterValueException("Volume " + volume.getName() + " has a custom size disk offering. Cannot change the disk offering." + " Please change the size instead");
}
final HypervisorType hypervisorType = this._volsDao.getHypervisorType(volume.getId());
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && hypervisorType != HypervisorType.Any && hypervisorType != HypervisorType.None) {
throw new InvalidParameterValueException("CloudStack currently supports volume resize only on KVM, or XenServer.");
}
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {
throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + volume.getUuid() + " is in state " + volume.getState() + ".");
}
// if we are to use the existing disk offering
if (newDiskOffering == null) {
newSize = cmd.getSize();
// if the caller is looking to change the size of the volume
if (newSize != null) {
if (!diskOffering.isCustomized() && !volume.getVolumeType().equals(VolumeType.ROOT)) {
throw new InvalidParameterValueException("To change a volume's size without providing a new disk offering, its current disk offering must be " + "customizable or it must be a root volume (if providing a disk offering, make sure it is different from the current disk offering).");
}
// convert from bytes to GiB
newSize = newSize << 30;
} else {
// no parameter provided; just use the original size of the volume
newSize = volume.getSize();
}
newMinIops = cmd.getMinIops();
if (newMinIops != null) {
if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter.");
}
} else {
// no parameter provided; just use the original min IOPS of the volume
newMinIops = volume.getMinIops();
}
newMaxIops = cmd.getMaxIops();
if (newMaxIops != null) {
if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter.");
}
} else {
// no parameter provided; just use the original max IOPS of the volume
newMaxIops = volume.getMaxIops();
}
validateIops(newMinIops, newMaxIops);
} else {
if (newDiskOffering.getRemoved() != null) {
throw new InvalidParameterValueException("Requested disk offering has been removed.");
}
if (!DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) {
throw new InvalidParameterValueException("Requested disk offering type is invalid.");
}
if (diskOffering.getTags() != null) {
if (!StringUtils.areTagsEqual(diskOffering.getTags(), newDiskOffering.getTags())) {
throw new InvalidParameterValueException("The tags on the new and old disk offerings must match.");
}
} else if (newDiskOffering.getTags() != null) {
throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well.");
}
if (!areIntegersEqual(diskOffering.getHypervisorSnapshotReserve(), newDiskOffering.getHypervisorSnapshotReserve())) {
throw new InvalidParameterValueException("The hypervisor snapshot reverse on the new and old disk offerings must be equal.");
}
if (newDiskOffering.getDomainId() != null) {
// not a public offering; check access
this._configMgr.checkDiskOfferingAccess(CallContext.current().getCallingAccount(), newDiskOffering);
}
if (newDiskOffering.isCustomized()) {
newSize = cmd.getSize();
if (newSize == null) {
throw new InvalidParameterValueException("The new disk offering requires that a size be specified.");
}
// convert from bytes to GiB
newSize = newSize << 30;
} else {
newSize = newDiskOffering.getDiskSize();
}
if (!volume.getSize().equals(newSize) && !volume.getVolumeType().equals(VolumeType.DATADISK)) {
throw new InvalidParameterValueException("Only data volumes can be resized via a new disk offering.");
}
if (newDiskOffering.isCustomizedIops() != null && newDiskOffering.isCustomizedIops()) {
newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops();
newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops();
validateIops(newMinIops, newMaxIops);
} else {
newMinIops = newDiskOffering.getMinIops();
newMaxIops = newDiskOffering.getMaxIops();
}
}
final long currentSize = volume.getSize();
// if the caller is looking to change the size of the volume
if (currentSize != newSize) {
if (!validateVolumeSizeRange(newSize)) {
throw new InvalidParameterValueException("Requested size out of range");
}
/*
* Let's make certain they (think they) know what they're doing if they
* want to shrink by forcing them to provide the shrinkok parameter.
* This will be checked again at the hypervisor level where we can see
* the actual disk size.
*/
if (currentSize > newSize && !shrinkOk) {
throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." + "Need to sign off by supplying the shrinkok parameter with value of true.");
}
if (newSize > currentSize) {
/* Check resource limit for this account on primary storage resource */
this._resourceLimitMgr.checkResourceLimit(this._accountMgr.getAccount(volume.getAccountId()), ResourceType.primary_storage, volume.isDisplayVolume(), new Long(newSize - currentSize).longValue());
}
}
/* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */
if (volume.getState() == Volume.State.Allocated) {
s_logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS.");
volume.setSize(newSize);
volume.setMinIops(newMinIops);
volume.setMaxIops(newMaxIops);
if (newDiskOffering != null) {
volume.setDiskOfferingId(cmd.getNewDiskOfferingId());
}
this._volsDao.update(volume.getId(), volume);
return volume;
}
final UserVmVO userVm = this._userVmDao.findById(volume.getInstanceId());
if (userVm != null) {
// serialize VM operation
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
final VmWorkJobVO placeHolder;
placeHolder = createPlaceHolderWork(userVm.getId());
try {
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
} finally {
this._workJobDao.expunge(placeHolder.getId());
}
} else {
final Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
try {
outcome.get();
} catch (final InterruptedException e) {
throw new RuntimeException("Operation was interrupted", e);
} catch (final java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution exception", e);
}
final Object jobResult = this._jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof ResourceAllocationException) {
throw (ResourceAllocationException) jobResult;
} else if (jobResult instanceof RuntimeException) {
throw (RuntimeException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
} else if (jobResult instanceof Long) {
return this._volsDao.findById((Long) jobResult);
}
}
return volume;
}
}
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
}
use of com.cloud.model.enumeration.HypervisorType in project cosmic by MissionCriticalCloud.
the class VolumeApiServiceImpl method attachVolumeToVM.
public Volume attachVolumeToVM(final Long vmId, final Long volumeId, final Long deviceId) {
final Account caller = CallContext.current().getCallingAccount();
// Check that the volume ID is valid
final VolumeInfo volumeToAttach = this.volFactory.getVolume(volumeId);
// Check that the volume is a data volume
if (volumeToAttach == null || !(volumeToAttach.getVolumeType() == VolumeType.DATADISK || volumeToAttach.getVolumeType() == VolumeType.ROOT)) {
throw new InvalidParameterValueException("Please specify a volume with the valid type: " + VolumeType.ROOT.toString() + " or " + VolumeType.DATADISK.toString());
}
// Check that the volume is not currently attached to any VM
if (volumeToAttach.getInstanceId() != null) {
throw new InvalidParameterValueException("Please specify a volume that is not attached to any VM.");
}
// Check that the volume is not destroyed
if (volumeToAttach.getState() == Volume.State.Destroy) {
throw new InvalidParameterValueException("Please specify a volume that is not destroyed.");
}
// Check that the virtual machine ID is valid and it's a user vm
final UserVmVO vm = this._userVmDao.findById(vmId);
if (vm == null || vm.getType() != VirtualMachineType.User) {
throw new InvalidParameterValueException("Please specify a valid User VM.");
}
// Check that the VM is in the correct state
if (vm.getState() != State.Running && vm.getState() != State.Stopped) {
throw new InvalidParameterValueException("Please specify a VM that is either running or stopped.");
}
// Check that the VM and the volume are in the same zone
if (vm.getDataCenterId() != volumeToAttach.getDataCenterId()) {
throw new InvalidParameterValueException("Please specify a VM that is in the same zone as the volume.");
}
// Check that the device ID is valid
if (deviceId != null) {
// validate ROOT volume type
if (deviceId.longValue() == 0) {
validateRootVolumeDetachAttach(this._volsDao.findById(volumeToAttach.getId()), vm);
// vm shouldn't have any volume with deviceId 0
if (!this._volsDao.findByInstanceAndDeviceId(vm.getId(), 0).isEmpty()) {
throw new InvalidParameterValueException("Vm already has root volume attached to it");
}
// volume can't be in Uploaded state
if (volumeToAttach.getState() == Volume.State.Uploaded) {
throw new InvalidParameterValueException("No support for Root volume attach in state " + Volume.State.Uploaded);
}
}
}
// that supported by hypervisor
if (deviceId == null || deviceId.longValue() != 0) {
final List<VolumeVO> existingDataVolumes = this._volsDao.findByInstanceAndType(vmId, VolumeType.DATADISK);
final int maxDataVolumesSupported = getMaxDataVolumesSupported(vm);
if (existingDataVolumes.size() >= maxDataVolumesSupported) {
throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxDataVolumesSupported + "). Please specify another" + " VM.");
}
}
// if target VM has associated VM snapshots
final List<VMSnapshotVO> vmSnapshots = this._vmSnapshotDao.findByVm(vmId);
if (vmSnapshots.size() > 0) {
throw new InvalidParameterValueException("Unable to attach volume, please specify a VM that does not have VM snapshots");
}
// permission check
this._accountMgr.checkAccess(caller, null, true, volumeToAttach, vm);
if (!(Volume.State.Allocated.equals(volumeToAttach.getState()) || Volume.State.Ready.equals(volumeToAttach.getState()) || Volume.State.Uploaded.equals(volumeToAttach.getState()))) {
throw new InvalidParameterValueException("Volume state must be in Allocated, Ready or in Uploaded state");
}
final Account owner = this._accountDao.findById(volumeToAttach.getAccountId());
if (!(volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Ready)) {
try {
this._resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize());
} catch (final ResourceAllocationException e) {
s_logger.error("primary storage resource limit check failed", e);
throw new InvalidParameterValueException(e.getMessage());
}
}
final HypervisorType rootDiskHyperType = vm.getHypervisorType();
final HypervisorType volumeToAttachHyperType = this._volsDao.getHypervisorType(volumeToAttach.getId());
final StoragePoolVO volumeToAttachStoragePool = this._storagePoolDao.findById(volumeToAttach.getPoolId());
// only perform this check if the volume's storage pool is not null and not managed
if (volumeToAttachStoragePool != null && !volumeToAttachStoragePool.isManaged()) {
if (volumeToAttachHyperType != HypervisorType.None && rootDiskHyperType != volumeToAttachHyperType) {
throw new InvalidParameterValueException("Can't attach a volume created by: " + volumeToAttachHyperType + " to a " + rootDiskHyperType + " vm");
}
}
final AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (asyncExecutionContext != null) {
final AsyncJob job = asyncExecutionContext.getJob();
if (s_logger.isInfoEnabled()) {
s_logger.info("Trying to attaching volume " + volumeId + " to vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress status");
}
this._jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
}
final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance
final VmWorkJobVO placeHolder;
placeHolder = createPlaceHolderWork(vmId);
try {
return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId, volumeToAttach.getDiskController());
} finally {
this._workJobDao.expunge(placeHolder.getId());
}
} else {
final Outcome<Volume> outcome = attachVolumeToVmThroughJobQueue(vmId, volumeId, deviceId, volumeToAttach.getDiskController());
Volume vol = null;
try {
outcome.get();
} catch (final InterruptedException e) {
throw new RuntimeException("Operation is interrupted", e);
} catch (final java.util.concurrent.ExecutionException e) {
throw new RuntimeException("Execution excetion", e);
}
final Object jobResult = this._jobMgr.unmarshallResultObject(outcome.getJob());
if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException) jobResult;
} else if (jobResult instanceof InvalidParameterValueException) {
throw (InvalidParameterValueException) jobResult;
} else if (jobResult instanceof RuntimeException) {
throw (RuntimeException) jobResult;
} else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable) jobResult);
} else if (jobResult instanceof Long) {
vol = this._volsDao.findById((Long) jobResult);
}
}
return vol;
}
}
Aggregations