use of com.cloud.exception.StorageAccessException in project cloudstack by apache.
the class VolumeOrchestrator method prepare.
@Override
public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException {
if (dest == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm);
}
throw new CloudRuntimeException("Unable to prepare Volume for vm because DeployDestination is null, vm:" + vm);
}
// don't allow to start vm that doesn't have a root volume
if (_volsDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT).isEmpty()) {
throw new CloudRuntimeException("Unable to prepare volumes for vm as ROOT volume is missing");
}
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
Volume vol = null;
StoragePool pool;
for (VolumeTask task : tasks) {
if (task.type == VolumeTaskType.NOP) {
vol = task.volume;
pool = (StoragePool) dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
// cluster. In that case, make sure that the volume is in the right access group.
if (pool.isManaged()) {
Host lastHost = _hostDao.findById(vm.getVirtualMachine().getLastHostId());
Host host = _hostDao.findById(vm.getVirtualMachine().getHostId());
long lastClusterId = lastHost == null || lastHost.getClusterId() == null ? -1 : lastHost.getClusterId();
long clusterId = host == null || host.getClusterId() == null ? -1 : host.getClusterId();
if (lastClusterId != clusterId) {
if (lastHost != null) {
storageMgr.removeStoragePoolFromCluster(lastHost.getId(), vol.get_iScsiName(), pool);
DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
volService.revokeAccess(volFactory.getVolume(vol.getId()), lastHost, storagePool);
}
try {
volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore) pool);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to volume: " + vol.getId() + " on host: " + host.getId());
}
} else {
// This might impact other managed storages, grant access for PowerFlex storage pool only
if (pool.getPoolType() == Storage.StoragePoolType.PowerFlex) {
try {
volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore) pool);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to volume: " + vol.getId() + " on host: " + host.getId());
}
}
}
}
} else if (task.type == VolumeTaskType.MIGRATE) {
pool = (StoragePool) dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
vol = migrateVolume(task.volume, pool);
} else if (task.type == VolumeTaskType.RECREATE) {
Pair<VolumeVO, DataStore> result = recreateVolume(task.volume, vm, dest);
pool = (StoragePool) dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary);
vol = result.first();
}
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore));
vm.addDisk(disk);
// If hypervisor is vSphere, check for clone type setting.
if (vm.getHypervisorType().equals(HypervisorType.VMware)) {
// retrieve clone flag.
UserVmCloneType cloneType = UserVmCloneType.linked;
Boolean value = CapacityManager.VmwareCreateCloneFull.valueIn(vol.getPoolId());
if (value != null && value) {
cloneType = UserVmCloneType.full;
}
UserVmCloneSettingVO cloneSettingVO = _vmCloneSettingDao.findByVmId(vm.getId());
if (cloneSettingVO != null) {
if (!cloneSettingVO.getCloneType().equals(cloneType.toString())) {
cloneSettingVO.setCloneType(cloneType.toString());
_vmCloneSettingDao.update(cloneSettingVO.getId(), cloneSettingVO);
}
} else {
UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(vm.getId(), cloneType.toString());
_vmCloneSettingDao.persist(vmCloneSettingVO);
}
}
}
}
use of com.cloud.exception.StorageAccessException in project cloudstack by apache.
the class VolumeOrchestrator method recreateVolume.
private Pair<VolumeVO, DataStore> recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, StorageAccessException {
VolumeVO newVol;
boolean recreate = RecreatableSystemVmEnabled.value();
DataStore destPool = null;
if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
s_logger.debug("existing pool: " + destPool.getId());
} else {
StoragePool pool = dest.getStorageForDisks().get(vol);
destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) {
newVol = vol;
} else {
newVol = switchVolume(vol, vm);
// changed
if (dest.getStorageForDisks() != null && dest.getStorageForDisks().containsKey(vol)) {
StoragePool poolWithOldVol = dest.getStorageForDisks().get(vol);
dest.getStorageForDisks().put(newVol, poolWithOldVol);
dest.getStorageForDisks().remove(vol);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Created new volume " + newVol + " for old volume " + vol);
}
}
VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
Long templateId = newVol.getTemplateId();
for (int i = 0; i < 2; i++) {
// retry one more time in case of template reload is required for VMware case
AsyncCallFuture<VolumeApiResult> future;
if (templateId == null) {
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
volume = volFactory.getVolume(newVol.getId(), destPool);
future = volService.createVolumeAsync(volume, destPool);
} else {
TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
PrimaryDataStore primaryDataStore = (PrimaryDataStore) destPool;
if (templ == null) {
if (tmplFactory.isTemplateMarkedForDirectDownload(templateId)) {
// Template is marked for direct download bypassing Secondary Storage
if (!primaryDataStore.isManaged()) {
templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId());
} else {
s_logger.debug("Direct download template: " + templateId + " on host: " + dest.getHost().getId() + " and copy to the managed storage pool: " + destPool.getId());
templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId());
}
if (templ == null) {
s_logger.debug("Failed to spool direct download template: " + templateId + " for data center " + dest.getDataCenter().getId());
throw new CloudRuntimeException("Failed to spool direct download template: " + templateId + " for data center " + dest.getDataCenter().getId());
}
} else {
s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
}
}
if (primaryDataStore.isManaged()) {
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
long hostId = vm.getVirtualMachine().getHostId();
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
} else {
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
}
}
VolumeApiResult result;
try {
result = future.get();
if (result.isFailed()) {
if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) {
s_logger.debug("Retry template re-deploy for vmware");
continue;
} else {
s_logger.debug("Unable to create " + newVol + ":" + result.getResult());
throw new StorageUnavailableException("Unable to create " + newVol + ":" + result.getResult(), destPool.getId());
}
}
StoragePoolVO storagePool = _storagePoolDao.findById(destPool.getId());
if (storagePool.isManaged()) {
long hostId = vm.getVirtualMachine().getHostId();
Host host = _hostDao.findById(hostId);
try {
volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to volume: " + newVol.getId() + " on host: " + host.getId());
}
}
newVol = _volsDao.findById(newVol.getId());
// break out of template-redeploy retry loop
break;
} catch (StorageAccessException e) {
throw e;
} catch (InterruptedException | ExecutionException e) {
s_logger.error("Unable to create " + newVol, e);
throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
}
}
return new Pair<VolumeVO, DataStore>(newVol, destPool);
}
use of com.cloud.exception.StorageAccessException in project cloudstack by apache.
the class VirtualMachineManagerImpl method orchestrateStart.
@Override
public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
final User caller = cctxt.getCallingUser();
VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
final VirtualMachineGuru vmGuru = getVmGuru(vm);
final Ternary<VMInstanceVO, ReservationContext, ItWorkVO> start = changeToStartState(vmGuru, vm, caller, account);
if (start == null) {
return;
}
vm = start.first();
final ReservationContext ctx = start.second();
ItWorkVO work = start.third();
VMInstanceVO startedVm = null;
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId());
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx);
}
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
// check resource count if ResourceCountRunningVMsonly.value() = true
final Account owner = _entityMgr.findById(Account.class, vm.getAccountId());
if (VirtualMachine.Type.User.equals(vm.type) && ResourceCountRunningVMsonly.value()) {
resourceCountIncrement(owner.getAccountId(), new Long(offering.getCpu()), new Long(offering.getRamSize()));
}
boolean canRetry = true;
ExcludeList avoids = null;
try {
final Journal journal = start.second().getJournal();
if (planToDeploy != null) {
avoids = planToDeploy.getAvoids();
}
if (avoids == null) {
avoids = new ExcludeList();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
}
boolean planChangedByVolume = false;
boolean reuseVolume = true;
final DataCenterDeployment originalPlan = plan;
int retry = StartRetry.value();
while (retry-- != 0) {
s_logger.debug("VM start attempt #" + (StartRetry.value() - retry));
if (reuseVolume) {
final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
for (final VolumeVO vol : vols) {
final Long volTemplateId = vol.getTemplateId();
if (volTemplateId != null && volTemplateId != template.getId()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool");
}
continue;
}
final StoragePool pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
if (!pool.isInMaintenance()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Root volume is ready, need to place VM in volume's cluster");
}
final long rootVolDcId = pool.getDataCenterId();
final Long rootVolPodId = pool.getPodId();
final Long rootVolClusterId = pool.getClusterId();
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
final Long clusterIdSpecified = planToDeploy.getClusterId();
if (clusterIdSpecified != null && rootVolClusterId != null) {
if (!rootVolClusterId.equals(clusterIdSpecified)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + rootVolClusterId + ", cluster specified: " + clusterIdSpecified);
}
throw new ResourceUnavailableException("Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified);
}
}
plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), vol.getPoolId(), null, ctx);
} else {
plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx);
if (s_logger.isDebugEnabled()) {
s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + " , and clusterId: " + rootVolClusterId);
}
planChangedByVolume = true;
}
}
}
}
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params);
logBootModeParameters(params);
DeployDestination dest = null;
try {
dest = _dpMgr.planDeployment(vmProfile, plan, avoids, planner);
} catch (final AffinityConflictException e2) {
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}
if (dest == null) {
if (planChangedByVolume) {
plan = originalPlan;
planChangedByVolume = false;
reuseVolume = false;
continue;
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
avoids.addHost(dest.getHost().getId());
if (!template.isDeployAsIs()) {
journal.record("Deployment found - Attempt #" + (StartRetry.value() - retry), vmProfile, dest);
}
long destHostId = dest.getHost().getId();
vm.setPodIdToDeployIn(dest.getPod().getId());
final Long cluster_id = dest.getCluster().getId();
final ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.CPU_OVER_COMMIT_RATIO);
final ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO);
if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO) == null && (Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f)) {
userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO, cluster_detail_cpu.getValue(), true);
userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, cluster_detail_ram.getValue(), true);
} else if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO) != null) {
userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO, cluster_detail_cpu.getValue(), true);
userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, cluster_detail_ram.getValue(), true);
}
vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue()));
vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue()));
StartAnswer startAnswer = null;
try {
if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) {
throw new ConcurrentOperationException("Unable to update the state of the Virtual Machine " + vm.getUuid() + " oldstate: " + vm.getState() + "Event :" + Event.OperationRetry);
}
} catch (final NoTransitionException e1) {
throw new ConcurrentOperationException(e1.getMessage());
}
try {
resetVmNicsDeviceId(vm.getId());
_networkMgr.prepare(vmProfile, dest, ctx);
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
volumeMgr.prepare(vmProfile, dest);
}
if (!reuseVolume) {
reuseVolume = true;
}
vmGuru.finalizeVirtualMachineProfile(vmProfile, dest, ctx);
final VirtualMachineTO vmTO = hvGuru.implement(vmProfile);
checkAndSetEnterSetupMode(vmTO, params);
handlePath(vmTO.getDisks(), vm.getHypervisorType());
Commands cmds = new Commands(Command.OnError.Stop);
cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence(vm.getHypervisorType())));
vmGuru.finalizeDeployment(cmds, vmProfile, dest, ctx);
addExtraConfig(vmTO);
work = _workDao.findById(work.getId());
if (work == null || work.getStep() != Step.Prepare) {
throw new ConcurrentOperationException("Work steps have been changed: " + work);
}
_workDao.updateStep(work, Step.Starting);
_agentMgr.send(destHostId, cmds);
_workDao.updateStep(work, Step.Started);
startAnswer = cmds.getAnswer(StartAnswer.class);
if (startAnswer != null && startAnswer.getResult()) {
handlePath(vmTO.getDisks(), startAnswer.getIqnToData());
final String host_guid = startAnswer.getHost_guid();
if (host_guid != null) {
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
if (finalHost == null) {
throw new CloudRuntimeException("Host Guid " + host_guid + " doesn't exist in DB, something went wrong while processing start answer: " + startAnswer);
}
destHostId = finalHost.getId();
}
if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) {
syncDiskChainChange(startAnswer);
if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) {
s_logger.error("Unable to transition to a new state. VM uuid: " + vm.getUuid() + "VM oldstate:" + vm.getState() + "Event:" + Event.OperationSucceeded);
throw new ConcurrentOperationException("Failed to deploy VM" + vm.getUuid());
}
final GPUDeviceTO gpuDevice = startAnswer.getVirtualMachine().getGpuDevice();
if (gpuDevice != null) {
_resourceMgr.updateGPUDetails(destHostId, gpuDevice.getGroupDetails());
}
if (userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.DEPLOY_VM) != null) {
userVmDetailsDao.removeDetail(vm.getId(), VmDetailConstants.DEPLOY_VM);
}
startedVm = vm;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Start completed for VM " + vm);
}
final Host vmHost = _hostDao.findById(destHostId);
if (vmHost != null && (VirtualMachine.Type.ConsoleProxy.equals(vm.getType()) || VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType())) && caManager.canProvisionCertificates()) {
final Map<String, String> sshAccessDetails = _networkMgr.getSystemVMAccessDetails(vm);
for (int retries = 3; retries > 0; retries--) {
try {
setupAgentSecurity(vmHost, sshAccessDetails, vm);
return;
} catch (final AgentUnavailableException | OperationTimedoutException e) {
s_logger.error("Retrying after catching exception while trying to secure agent for systemvm id=" + vm.getId(), e);
}
}
throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId());
}
return;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.info("The guru did not like the answers so stopping " + vm);
}
StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false);
stopCmd.setControlIp(getControlNicIpForVM(vm));
Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId());
if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
stopCmd.setVlanToPersistenceMap(vlanToPersistenceMap);
}
final StopCommand cmd = stopCmd;
final Answer answer = _agentMgr.easySend(destHostId, cmd);
if (answer != null && answer instanceof StopAnswer) {
final StopAnswer stopAns = (StopAnswer) answer;
if (vm.getType() == VirtualMachine.Type.User) {
final String platform = stopAns.getPlatform();
if (platform != null) {
final Map<String, String> vmmetadata = new HashMap<>();
vmmetadata.put(vm.getInstanceName(), platform);
syncVMMetaData(vmmetadata);
}
}
}
if (answer == null || !answer.getResult()) {
s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers"));
_haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop);
throw new ExecutionException("Unable to stop this VM, " + vm.getUuid() + " so we are unable to retry the start operation");
}
throw new ExecutionException("Unable to start VM:" + vm.getUuid() + " due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
} catch (OperationTimedoutException e) {
s_logger.debug("Unable to send the start command to host " + dest.getHost() + " failed to start VM: " + vm.getUuid());
if (e.isActive()) {
_haMgr.scheduleStop(vm, destHostId, WorkType.CheckStop);
}
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.warn("Unable to contact resource.", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected ResourceUnavailableException : " + e.getScope().getName(), e);
throw e;
}
}
} catch (final InsufficientCapacityException e) {
s_logger.warn("Insufficient capacity ", e);
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
} else {
s_logger.warn("unexpected InsufficientCapacityException : " + e.getScope().getName(), e);
}
}
} catch (ExecutionException | NoTransitionException e) {
s_logger.error("Failed to start instance " + vm, e);
throw new AgentUnavailableException("Unable to start instance due to " + e.getMessage(), destHostId, e);
} catch (final StorageAccessException e) {
s_logger.warn("Unable to access storage on host", e);
} finally {
if (startedVm == null && canRetry) {
final Step prevStep = work.getStep();
_workDao.updateStep(work, Step.Release);
if ((prevStep == Step.Started || prevStep == Step.Starting) && startAnswer != null && startAnswer.getResult()) {
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, false);
} else {
cleanup(vmGuru, vmProfile, work, Event.OperationFailed, true);
}
}
}
}
} finally {
if (startedVm == null) {
if (VirtualMachine.Type.User.equals(vm.type) && ResourceCountRunningVMsonly.value()) {
resourceCountDecrement(owner.getAccountId(), new Long(offering.getCpu()), new Long(offering.getRamSize()));
}
if (canRetry) {
try {
changeState(vm, Event.OperationFailed, null, work, Step.Done);
} catch (final NoTransitionException e) {
throw new ConcurrentOperationException(e.getMessage());
}
}
}
if (planToDeploy != null) {
planToDeploy.setAvoids(avoids);
}
}
if (startedVm == null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
use of com.cloud.exception.StorageAccessException in project cloudstack by apache.
the class VolumeServiceImpl method createManagedVolumeCopyManagedTemplateAsync.
private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo srcTemplateOnPrimary, Host destHost, AsyncCallFuture<VolumeApiResult> future) throws StorageAccessException {
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId(), null);
if (templatePoolRef == null) {
throw new CloudRuntimeException("Failed to find template " + srcTemplateOnPrimary.getUniqueName() + " in storage pool " + srcTemplateOnPrimary.getId());
}
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
throw new CloudRuntimeException("Template " + srcTemplateOnPrimary.getUniqueName() + " has not been downloaded to primary storage.");
}
String volumeDetailKey = "POOL_TEMPLATE_ID_COPY_ON_HOST_" + destHost.getId();
try {
try {
grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId());
}
_volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false);
// Create a volume on managed storage.
AsyncCallFuture<VolumeApiResult> createVolumeFuture = createVolumeAsync(volumeInfo, destPrimaryDataStore);
VolumeApiResult createVolumeResult = createVolumeFuture.get();
if (createVolumeResult.isFailed()) {
throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult());
}
// Refresh the volume info from the DB.
volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore);
Map<String, String> details = new HashMap<String, String>();
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress());
details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort()));
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName());
details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName());
details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize()));
details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId())));
destPrimaryDataStore.setDetails(details);
grantAccess(volumeInfo, destHost, destPrimaryDataStore);
volumeInfo.processEvent(Event.CreateRequested);
CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, srcTemplateOnPrimary, future, null, null);
AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createVolumeFromBaseManagedImageCallBack(null, null));
caller.setContext(context);
try {
motionSrv.copyAsync(srcTemplateOnPrimary, volumeInfo, destHost, caller);
} finally {
revokeAccess(volumeInfo, destHost, destPrimaryDataStore);
}
} catch (StorageAccessException e) {
throw e;
} catch (Throwable e) {
s_logger.debug("Failed to copy managed template on primary storage", e);
String errMsg = "Failed due to " + e.toString();
try {
destroyAndReallocateManagedVolume(volumeInfo);
} catch (CloudRuntimeException ex) {
s_logger.warn("Failed to destroy managed volume: " + volumeInfo.getId());
errMsg += " : " + ex.getMessage();
}
VolumeApiResult result = new VolumeApiResult(volumeInfo);
result.setResult(errMsg);
future.complete(result);
} finally {
_volumeDetailsDao.removeDetail(volumeInfo.getId(), volumeDetailKey);
List<VolumeDetailVO> volumeDetails = _volumeDetailsDao.findDetails(volumeDetailKey, String.valueOf(templatePoolRef.getId()), false);
if (volumeDetails == null || volumeDetails.isEmpty()) {
revokeAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore);
}
}
}
use of com.cloud.exception.StorageAccessException in project cloudstack by apache.
the class VolumeServiceImpl method createManagedStorageTemplate.
@Override
public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDataStoreId, long destHostId) throws StorageAccessException {
Host destHost = _hostDao.findById(destHostId);
if (destHost == null) {
throw new CloudRuntimeException("Destination host should not be null.");
}
TemplateInfo srcTemplateInfo = tmplFactory.getTemplate(srcTemplateId);
if (srcTemplateInfo == null) {
throw new CloudRuntimeException("Failed to get info of template: " + srcTemplateId);
}
if (Storage.ImageFormat.ISO.equals(srcTemplateInfo.getFormat())) {
throw new CloudRuntimeException("Unsupported format: " + Storage.ImageFormat.ISO.toString() + " for managed storage template");
}
GlobalLock lock = null;
TemplateInfo templateOnPrimary = null;
try {
String templateIdManagedPoolIdLockString = "templateId:" + srcTemplateId + "managedPoolId:" + destDataStoreId;
lock = GlobalLock.getInternLock(templateIdManagedPoolIdLockString);
if (lock == null) {
throw new CloudRuntimeException("Unable to create managed storage template, couldn't get global lock on " + templateIdManagedPoolIdLockString);
}
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
if (!lock.lock(storagePoolMaxWaitSeconds)) {
s_logger.debug("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString);
throw new CloudRuntimeException("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString);
}
PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId);
// Check if template exists on the storage pool. If not, downland and copy to managed storage pool
VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destDataStoreId, srcTemplateId, null);
if (templatePoolRef != null && templatePoolRef.getDownloadState() == Status.DOWNLOADED) {
return tmplFactory.getTemplate(srcTemplateId, destPrimaryDataStore);
}
templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore);
if (templateOnPrimary == null) {
throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId);
}
templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null);
if (templatePoolRef == null) {
throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId());
}
if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) {
// Populate details which will be later read by the storage subsystem.
Map<String, String> details = new HashMap<>();
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress());
details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort()));
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, templateOnPrimary.getInstallPath());
details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName());
details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString());
details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize()));
details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId())));
destPrimaryDataStore.setDetails(details);
try {
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId());
}
templateOnPrimary.processEvent(Event.CopyingRequested);
try {
// Download and copy template to the managed volume
TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId);
if (templateOnPrimaryNow == null) {
s_logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId());
throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId());
}
templateOnPrimary.processEvent(Event.OperationSuccessed);
return templateOnPrimaryNow;
} finally {
revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore);
}
}
return null;
} catch (StorageAccessException e) {
throw e;
} catch (Throwable e) {
s_logger.debug("Failed to create template on managed primary storage", e);
if (templateOnPrimary != null) {
templateOnPrimary.processEvent(Event.OperationFailed);
}
throw new CloudRuntimeException(e.getMessage());
} finally {
if (lock != null) {
lock.unlock();
lock.releaseRef();
}
}
}
Aggregations