use of org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd in project cloudstack by apache.
the class VolumeApiServiceImpl method changeDiskOfferingForVolumeInternal.
private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException {
DiskOfferingVO existingDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newDiskOfferingId);
Integer newHypervisorSnapshotReserve = null;
boolean volumeMigrateRequired = false;
boolean volumeResizeRequired = false;
// VALIDATIONS
Long[] updateNewSize = { newSize };
Long[] updateNewMinIops = { newMinIops };
Long[] updateNewMaxIops = { newMaxIops };
Integer[] updateNewHypervisorSnapshotReserve = { newHypervisorSnapshotReserve };
validateVolumeResizeWithNewDiskOfferingAndLoad(volume, existingDiskOffering, newDiskOffering, updateNewSize, updateNewMinIops, updateNewMaxIops, updateNewHypervisorSnapshotReserve);
newSize = updateNewSize[0];
newMinIops = updateNewMinIops[0];
newMaxIops = updateNewMaxIops[0];
newHypervisorSnapshotReserve = updateNewHypervisorSnapshotReserve[0];
long currentSize = volume.getSize();
validateVolumeResizeWithSize(volume, currentSize, newSize, shrinkOk);
// We need to publish this event to usage_volume table
if (volume.getState() == Volume.State.Allocated) {
s_logger.debug(String.format("Volume %s is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume.getUuid()));
volume.setSize(newSize);
volume.setMinIops(newMinIops);
volume.setMaxIops(newMaxIops);
volume.setHypervisorSnapshotReserve(newHypervisorSnapshotReserve);
if (newDiskOffering != null) {
volume.setDiskOfferingId(newDiskOfferingId);
}
_volsDao.update(volume.getId(), volume);
if (currentSize != newSize) {
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_RESIZE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid());
}
return volume;
}
if (currentSize != newSize || newMaxIops != volume.getMaxIops() || newMinIops != volume.getMinIops()) {
volumeResizeRequired = true;
validateVolumeReadyStateAndHypervisorChecks(volume, currentSize, newSize);
}
StoragePoolVO existingStoragePool = _storagePoolDao.findById(volume.getPoolId());
Pair<List<? extends StoragePool>, List<? extends StoragePool>> poolsPair = managementService.listStoragePoolsForMigrationOfVolumeInternal(volume.getId(), newDiskOffering.getId(), newSize, newMinIops, newMaxIops, true);
List<? extends StoragePool> suitableStoragePools = poolsPair.second();
if (!suitableStoragePools.stream().anyMatch(p -> (p.getId() == existingStoragePool.getId()))) {
volumeMigrateRequired = true;
if (!autoMigrateVolume) {
throw new InvalidParameterValueException(String.format("Failed to change offering for volume %s since automigrate is set to false but volume needs to migrated", volume.getUuid()));
}
}
if (!volumeMigrateRequired && !volumeResizeRequired) {
_volsDao.updateDiskOffering(volume.getId(), newDiskOffering.getId());
volume = _volsDao.findById(volume.getId());
return volume;
}
if (volumeMigrateRequired) {
if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume ID: %s as no suitable pool(s) found for migrating to support new disk offering", volume.getUuid()));
}
Collections.shuffle(suitableStoragePools);
MigrateVolumeCmd migrateVolumeCmd = new MigrateVolumeCmd(volume.getId(), suitableStoragePools.get(0).getId(), newDiskOffering.getId(), true);
try {
volume = (VolumeVO) migrateVolume(migrateVolumeCmd);
if (volume == null) {
throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s migration failed to storage pool %s", volume.getUuid(), suitableStoragePools.get(0).getId()));
}
} catch (Exception e) {
throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s migration failed to storage pool %s due to %s", volume.getUuid(), suitableStoragePools.get(0).getId(), e.getMessage()));
}
}
if (volumeResizeRequired) {
// refresh volume data
volume = _volsDao.findById(volume.getId());
try {
volume = resizeVolumeInternal(volume, newDiskOffering, currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk);
} catch (Exception e) {
if (volumeMigrateRequired) {
s_logger.warn(String.format("Volume change offering operation succeeded for volume ID: %s but volume resize operation failed, so please try resize volume operation separately", volume.getUuid()));
} else {
throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s due to resize volume operation failed", volume.getUuid()));
}
}
}
return volume;
}
Aggregations