use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class SolidFirePrimaryDataStoreDriver method verifySufficientBytesForStoragePool.
private void verifySufficientBytesForStoragePool(long storagePoolId, long volumeId, long newSize, Integer newHypervisorSnapshotReserve) {
DataStore primaryDataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
VolumeInfo volumeInfo = volumeFactory.getVolume(volumeId, primaryDataStore);
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
long currentSizeWithHsr = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePool);
newHypervisorSnapshotReserve = newHypervisorSnapshotReserve == null ? LOWEST_HYPERVISOR_SNAPSHOT_RESERVE : Math.max(newHypervisorSnapshotReserve, LOWEST_HYPERVISOR_SNAPSHOT_RESERVE);
long newSizeWithHsr = (long) (newSize + newSize * (newHypervisorSnapshotReserve / 100f));
if (newSizeWithHsr < currentSizeWithHsr) {
throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not support shrinking a volume.");
}
long availableBytes = storagePool.getCapacityBytes() - getUsedBytes(storagePool);
if ((newSizeWithHsr - currentSizeWithHsr) > availableBytes) {
throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not have enough space to expand the volume.");
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class DeploymentPlanningManagerImpl method findSuitablePoolsForVolumes.
protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
// There should be atleast the ROOT volume of the VM in usable state
if (volumesTobeCreated.isEmpty()) {
throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM");
}
// don't allow to start vm that doesn't have a root volume
if (_volsDao.findByInstanceAndType(vmProfile.getId(), Volume.Type.ROOT).isEmpty()) {
throw new CloudRuntimeException("Unable to prepare volumes for vm as ROOT volume is missing");
}
// for each volume find list of suitable storage pools by calling the
// allocators
Set<Long> originalAvoidPoolSet = avoid.getPoolsToAvoid();
if (originalAvoidPoolSet == null) {
originalAvoidPoolSet = new HashSet<Long>();
}
Set<Long> poolsToAvoidOutput = new HashSet<Long>(originalAvoidPoolSet);
for (VolumeVO toBeCreated : volumesTobeCreated) {
s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")");
// be reused.
if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) {
s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId());
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
StoragePool pool = null;
if (toBeCreated.getPoolId() != null) {
pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
} else {
pool = (StoragePool) dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
}
if (!pool.isInMaintenance()) {
if (!avoid.shouldAvoid(pool)) {
long exstPoolDcId = pool.getDataCenterId();
long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
boolean canReusePool = false;
if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) {
canReusePool = true;
} else if (plan.getDataCenterId() == exstPoolDcId) {
DataStore dataStore = dataStoreMgr.getPrimaryDataStore(pool.getId());
if (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE) {
canReusePool = true;
}
} else {
s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
canReusePool = false;
}
if (canReusePool) {
s_logger.debug("Planner need not allocate a pool for this volume since its READY");
suitablePools.add(pool);
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
readyAndReusedVolumes.add(toBeCreated);
}
continue;
}
} else {
s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
}
} else {
s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("We need to allocate new storagepool for this volume");
}
if (!isRootAdmin(vmProfile)) {
if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
}
// Cannot find suitable storage pools under this cluster for
// this volume since allocation_state is disabled.
// - remove any suitable pools found for other volumes.
// All volumes should get suitable pools under this cluster;
// else we cant use this cluster.
suitableVolumeStoragePools.clear();
break;
}
}
s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
if (vmProfile.getTemplate().getFormat() == Storage.ImageFormat.ISO && vmProfile.getServiceOffering().getTagsArray().length != 0) {
diskOffering.setTagsArray(Arrays.asList(vmProfile.getServiceOffering().getTagsArray()));
}
DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
boolean useLocalStorage = false;
if (vmProfile.getType() != VirtualMachine.Type.User) {
DataCenterVO zone = _dcDao.findById(plan.getDataCenterId());
assert (zone != null) : "Invalid zone in deployment plan";
Boolean useLocalStorageForSystemVM = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(zone.getId());
if (useLocalStorageForSystemVM != null) {
useLocalStorage = useLocalStorageForSystemVM.booleanValue();
s_logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId());
}
} else {
useLocalStorage = diskOffering.getUseLocalStorage();
// offering when it is a ROOT disk
if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
if (toBeCreated.getVolumeType() == Volume.Type.ROOT) {
useLocalStorage = true;
}
}
}
diskProfile.setUseLocalStorage(useLocalStorage);
boolean foundPotentialPools = false;
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
if (suitablePools != null && !suitablePools.isEmpty()) {
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
foundPotentialPools = true;
break;
}
}
if (avoid.getPoolsToAvoid() != null) {
poolsToAvoidOutput.addAll(avoid.getPoolsToAvoid());
avoid.getPoolsToAvoid().retainAll(originalAvoidPoolSet);
}
if (!foundPotentialPools) {
s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId());
// No suitable storage pools found under this cluster for this
// volume. - remove any suitable pools found for other volumes.
// All volumes should get suitable pools under this cluster;
// else we cant use this cluster.
suitableVolumeStoragePools.clear();
break;
}
}
HashSet<Long> toRemove = new HashSet<Long>();
for (List<StoragePool> lsp : suitableVolumeStoragePools.values()) {
for (StoragePool sp : lsp) {
toRemove.add(sp.getId());
}
}
poolsToAvoidOutput.removeAll(toRemove);
if (avoid.getPoolsToAvoid() != null) {
avoid.getPoolsToAvoid().addAll(poolsToAvoidOutput);
}
if (suitableVolumeStoragePools.isEmpty()) {
s_logger.debug("No suitable pools found");
}
return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class SwiftImageStoreDriverImpl method createAsync.
@Override
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes();
VirtualMachineTemplate tmpl = _templateDao.findById(data.getId());
DataStore cacheStore = cacheManager.getCacheStorage(dataStore.getScope());
DownloadCommand dcmd = new DownloadCommand((TemplateObjectTO) (data.getTO()), maxTemplateSizeInBytes);
dcmd.setCacheStore(cacheStore.getTO());
dcmd.setProxy(getHttpProxy());
EndPoint ep = _epSelector.select(data);
if (ep == null) {
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
s_logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
CreateContext<CreateCmdResult> context = new CreateContext<CreateCmdResult>(callback, data);
AsyncCallbackDispatcher<SwiftImageStoreDriverImpl, DownloadAnswer> caller = AsyncCallbackDispatcher.create(this);
caller.setContext(context);
if (data.getType() == DataObjectType.TEMPLATE) {
caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null));
} else if (data.getType() == DataObjectType.VOLUME) {
caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null));
}
ep.sendMessageAsync(dcmd, caller);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeApiServiceImpl method uploadVolume.
@Override
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume for post upload", async = true)
public GetUploadParamsResponse uploadVolume(final GetUploadParamsForVolumeCmd cmd) throws ResourceAllocationException, MalformedURLException {
Account caller = CallContext.current().getCallingAccount();
long ownerId = cmd.getEntityOwnerId();
final Account owner = _entityMgr.findById(Account.class, ownerId);
final Long zoneId = cmd.getZoneId();
final String volumeName = cmd.getName();
String format = cmd.getFormat();
final Long diskOfferingId = cmd.getDiskOfferingId();
String imageStoreUuid = cmd.getImageStoreUuid();
final DataStore store = _tmpltMgr.getImageStore(imageStoreUuid, zoneId);
validateVolume(caller, ownerId, zoneId, volumeName, null, format, diskOfferingId);
return Transaction.execute(new TransactionCallbackWithException<GetUploadParamsResponse, MalformedURLException>() {
@Override
public GetUploadParamsResponse doInTransaction(TransactionStatus status) throws MalformedURLException {
VolumeVO volume = persistVolume(owner, zoneId, volumeName, null, cmd.getFormat(), diskOfferingId, Volume.State.NotUploaded);
VolumeInfo vol = volFactory.getVolume(volume.getId());
RegisterVolumePayload payload = new RegisterVolumePayload(null, cmd.getChecksum(), cmd.getFormat());
vol.addPayload(payload);
Pair<EndPoint, DataObject> pair = volService.registerVolumeForPostUpload(vol, store);
EndPoint ep = pair.first();
DataObject dataObject = pair.second();
GetUploadParamsResponse response = new GetUploadParamsResponse();
String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key());
String url = ImageStoreUtil.generatePostUploadUrl(ssvmUrlDomain, ep.getPublicAddr(), vol.getUuid());
response.setPostURL(new URL(url));
// set the post url, this is used in the monitoring thread to determine the SSVM
VolumeDataStoreVO volumeStore = _volumeStoreDao.findByVolume(vol.getId());
assert (volumeStore != null) : "sincle volume is registered, volumestore cannot be null at this stage";
volumeStore.setExtractUrl(url);
_volumeStoreDao.persist(volumeStore);
response.setId(UUID.fromString(vol.getUuid()));
int timeout = ImageStoreUploadMonitorImpl.getUploadOperationTimeout();
DateTime currentDateTime = new DateTime(DateTimeZone.UTC);
String expires = currentDateTime.plusMinutes(timeout).toString();
response.setTimeout(expires);
String key = _configDao.getValue(Config.SSVMPSK.key());
/*
* encoded metadata using the post upload config key
*/
TemplateOrVolumePostUploadCommand command = new TemplateOrVolumePostUploadCommand(vol.getId(), vol.getUuid(), volumeStore.getInstallPath(), cmd.getChecksum(), vol.getType().toString(), vol.getName(), vol.getFormat().toString(), dataObject.getDataStore().getUri(), dataObject.getDataStore().getRole().toString());
command.setLocalPath(volumeStore.getLocalDownloadPath());
//using the existing max upload size configuration
command.setMaxUploadSize(_configDao.getValue(Config.MaxUploadVolumeSize.key()));
command.setDefaultMaxAccountSecondaryStorage(_configDao.getValue(Config.DefaultMaxAccountSecondaryStorage.key()));
command.setAccountId(vol.getAccountId());
Gson gson = new GsonBuilder().create();
String metadata = EncryptionUtil.encodeData(gson.toJson(command), key);
response.setMetadata(metadata);
/*
* signature calculated on the url, expiry, metadata.
*/
response.setSignature(EncryptionUtil.generateSignature(metadata + url + expires, key));
return response;
}
});
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeApiServiceImpl method orchestrateDetachVolumeFromVM.
private Volume orchestrateDetachVolumeFromVM(long vmId, long volumeId) {
Volume volume = _volsDao.findById(volumeId);
VMInstanceVO vm = _vmInstanceDao.findById(vmId);
String errorMsg = "Failed to detach volume " + volume.getName() + " from VM " + vm.getHostName();
boolean sendCommand = vm.getState() == State.Running;
Long hostId = vm.getHostId();
if (hostId == null) {
hostId = vm.getLastHostId();
HostVO host = _hostDao.findById(hostId);
if (host != null && host.getHypervisorType() == HypervisorType.VMware) {
sendCommand = true;
}
}
HostVO host = null;
StoragePoolVO volumePool = _storagePoolDao.findByIdIncludingRemoved(volume.getPoolId());
if (hostId != null) {
host = _hostDao.findById(hostId);
if (host != null && host.getHypervisorType() == HypervisorType.XenServer && volumePool != null && volumePool.isManaged()) {
sendCommand = true;
}
}
Answer answer = null;
if (sendCommand) {
DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType());
DettachCommand cmd = new DettachCommand(disk, vm.getInstanceName());
cmd.setManaged(volumePool.isManaged());
cmd.setStorageHost(volumePool.getHostAddress());
cmd.setStoragePort(volumePool.getPort());
cmd.set_iScsiName(volume.get_iScsiName());
try {
answer = _agentMgr.send(hostId, cmd);
} catch (Exception e) {
throw new CloudRuntimeException(errorMsg + " due to: " + e.getMessage());
}
}
if (!sendCommand || (answer != null && answer.getResult())) {
// Mark the volume as detached
_volsDao.detachVolume(volume.getId());
// volume.getPoolId() should be null if the VM we are detaching the disk from has never been started before
DataStore dataStore = volume.getPoolId() != null ? dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary) : null;
volService.revokeAccess(volFactory.getVolume(volume.getId()), host, dataStore);
return _volsDao.findById(volumeId);
} else {
if (answer != null) {
String details = answer.getDetails();
if (details != null && !details.isEmpty()) {
errorMsg += "; " + details;
}
}
throw new CloudRuntimeException(errorMsg);
}
}
Aggregations