use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult in project cloudstack by apache.
the class VolumeOrchestrator method createVolume.
@DB
public VolumeInfo createVolume(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering, DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) {
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volume = volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
StoragePool pool = null;
DiskProfile dskCh = null;
if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) {
dskCh = createDiskCharacteristics(volume, template, dc, offering);
storageMgr.setDiskProfileThrottling(dskCh, offering, diskOffering);
} else {
dskCh = createDiskCharacteristics(volume, template, dc, diskOffering);
storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering);
}
if (diskOffering != null && diskOffering.isCustomized()) {
dskCh.setSize(size);
}
dskCh.setHyperType(hyperType);
final HashSet<StoragePool> avoidPools = new HashSet<StoragePool>(avoids);
pool = findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), vm, avoidPools);
if (pool == null) {
s_logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName());
throw new CloudRuntimeException("Unable to find suitable primary storage when creating volume " + volume.getName());
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Trying to create " + volume + " on " + pool);
}
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
for (int i = 0; i < 2; i++) {
// retry one more time in case of template reload is required for Vmware case
AsyncCallFuture<VolumeApiResult> future = null;
boolean isNotCreatedFromTemplate = volume.getTemplateId() == null ? true : false;
if (isNotCreatedFromTemplate) {
future = volService.createVolumeAsync(volume, store);
} else {
TemplateInfo templ = tmplFactory.getTemplate(template.getId(), DataStoreRole.Image);
future = volService.createVolumeFromTemplateAsync(volume, store.getId(), templ);
}
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
if (result.getResult().contains("request template reload") && (i == 0)) {
s_logger.debug("Retry template re-deploy for vmware");
continue;
} else {
s_logger.debug("create volume failed: " + result.getResult());
throw new CloudRuntimeException("create volume failed:" + result.getResult());
}
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.error("create volume failed", e);
throw new CloudRuntimeException("create volume failed", e);
} catch (ExecutionException e) {
s_logger.error("create volume failed", e);
throw new CloudRuntimeException("create volume failed", e);
}
}
throw new CloudRuntimeException("create volume failed even after template re-deploy");
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult in project cloudstack by apache.
the class VolumeOrchestrator method recreateVolume.
private Pair<VolumeVO, DataStore> recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException {
VolumeVO newVol;
boolean recreate = RecreatableSystemVmEnabled.value();
DataStore destPool = null;
if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
s_logger.debug("existing pool: " + destPool.getId());
} else {
StoragePool pool = dest.getStorageForDisks().get(vol);
destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) {
newVol = vol;
} else {
newVol = switchVolume(vol, vm);
// changed
if (dest.getStorageForDisks() != null && dest.getStorageForDisks().containsKey(vol)) {
StoragePool poolWithOldVol = dest.getStorageForDisks().get(vol);
dest.getStorageForDisks().put(newVol, poolWithOldVol);
dest.getStorageForDisks().remove(vol);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Created new volume " + newVol + " for old volume " + vol);
}
}
VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
Long templateId = newVol.getTemplateId();
for (int i = 0; i < 2; i++) {
// retry one more time in case of template reload is required for Vmware case
AsyncCallFuture<VolumeApiResult> future = null;
if (templateId == null) {
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
volume = volFactory.getVolume(newVol.getId(), destPool);
future = volService.createVolumeAsync(volume, destPool);
} else {
TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
if (templ == null) {
s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
}
PrimaryDataStore primaryDataStore = (PrimaryDataStore) destPool;
if (primaryDataStore.isManaged()) {
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
long hostId = vm.getVirtualMachine().getHostId();
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
} else {
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
}
}
VolumeApiResult result;
try {
result = future.get();
if (result.isFailed()) {
if (result.getResult().contains("request template reload") && (i == 0)) {
s_logger.debug("Retry template re-deploy for vmware");
continue;
} else {
s_logger.debug("Unable to create " + newVol + ":" + result.getResult());
throw new StorageUnavailableException("Unable to create " + newVol + ":" + result.getResult(), destPool.getId());
}
}
StoragePoolVO storagePool = _storagePoolDao.findById(destPool.getId());
if (storagePool.isManaged()) {
long hostId = vm.getVirtualMachine().getHostId();
Host host = _hostDao.findById(hostId);
volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool);
}
newVol = _volsDao.findById(newVol.getId());
//break out of template-redeploy retry loop
break;
} catch (InterruptedException | ExecutionException e) {
s_logger.error("Unable to create " + newVol, e);
throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
}
}
return new Pair<VolumeVO, DataStore>(newVol, destPool);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult in project cloudstack by apache.
the class VolumeOrchestrator method migrateVolume.
@Override
@DB
public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
VolumeInfo vol = volFactory.getVolume(volume.getId());
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, (DataStore) destPool);
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.error("Migrate volume failed:" + result.getResult());
throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId());
} else {
// update the volumeId for snapshots on secondary
if (!_snapshotDao.listByVolumeId(vol.getId()).isEmpty()) {
_snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
_snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
}
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("migrate volume failed", e);
throw new CloudRuntimeException(e.getMessage());
} catch (ExecutionException e) {
s_logger.debug("migrate volume failed", e);
throw new CloudRuntimeException(e.getMessage());
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult in project cloudstack by apache.
the class VolumeOrchestrator method copyVolumeFromSecToPrimary.
@DB
public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering, DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) throws NoTransitionException {
final HashSet<StoragePool> avoidPools = new HashSet<StoragePool>(avoids);
DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering);
dskCh.setHyperType(vm.getHypervisorType());
storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering);
// Find a suitable storage to create volume on
StoragePool destPool = findStoragePool(dskCh, dc, pod, clusterId, null, vm, avoidPools);
DataStore destStore = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(volume, destStore);
try {
VolumeApiResult result = future.get();
if (result.isFailed()) {
s_logger.debug("copy volume failed: " + result.getResult());
throw new CloudRuntimeException("copy volume failed: " + result.getResult());
}
return result.getVolume();
} catch (InterruptedException e) {
s_logger.debug("Failed to copy volume: " + volume.getId(), e);
throw new CloudRuntimeException("Failed to copy volume", e);
} catch (ExecutionException e) {
s_logger.debug("Failed to copy volume: " + volume.getId(), e);
throw new CloudRuntimeException("Failed to copy volume", e);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult in project cloudstack by apache.
the class VolumeOrchestrator method cleanupVolumes.
@Override
@DB
public void cleanupVolumes(long vmId) throws ConcurrentOperationException {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cleaning storage for vm: " + vmId);
}
final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
final List<VolumeVO> toBeExpunged = new ArrayList<VolumeVO>();
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
for (VolumeVO vol : volumesForVm) {
if (vol.getVolumeType().equals(Type.ROOT)) {
// Destroy volume if not already destroyed
boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged || vol.getState() == Volume.State.Expunging);
if (!volumeAlreadyDestroyed) {
volService.destroyVolume(vol.getId());
} else {
s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString());
}
toBeExpunged.add(vol);
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Detaching " + vol);
}
_volsDao.detachVolume(vol.getId());
}
}
}
});
AsyncCallFuture<VolumeApiResult> future = null;
for (VolumeVO expunge : toBeExpunged) {
future = volService.expungeVolumeAsync(volFactory.getVolume(expunge.getId()));
try {
future.get();
} catch (InterruptedException e) {
s_logger.debug("failed expunge volume" + expunge.getId(), e);
} catch (ExecutionException e) {
s_logger.debug("failed expunge volume" + expunge.getId(), e);
}
}
}
Aggregations