use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class StorageManagerImpl method cleanupSecondaryStorage.
@Override
@DB
public void cleanupSecondaryStorage(boolean recurring) {
// so here we don't need to issue DeleteCommand to resource anymore, only need to remove db entry.
try {
// Cleanup templates in template_store_ref
List<DataStore> imageStores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(null));
for (DataStore store : imageStores) {
try {
long storeId = store.getId();
List<TemplateDataStoreVO> destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId);
s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName());
for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO);
}
_templateStoreDao.remove(destroyedTemplateStoreVO.getId());
}
} catch (Exception e) {
s_logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e);
}
}
// CleanUp snapshots on snapshot_store_ref
for (DataStore store : imageStores) {
try {
List<SnapshotDataStoreVO> destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId());
s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName());
for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) {
// check if this snapshot has child
SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store);
if (snap.getChild() != null) {
s_logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child");
continue;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO);
}
_snapshotDao.remove(destroyedSnapshotStoreVO.getSnapshotId());
SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findBySnapshot(destroyedSnapshotStoreVO.getSnapshotId(), DataStoreRole.Primary);
if (snapshotOnPrimary != null) {
_snapshotStoreDao.remove(snapshotOnPrimary.getId());
}
_snapshotStoreDao.remove(destroyedSnapshotStoreVO.getId());
}
} catch (Exception e2) {
s_logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2);
}
}
// CleanUp volumes on volume_store_ref
for (DataStore store : imageStores) {
try {
List<VolumeDataStoreVO> destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId());
s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName());
for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO);
}
_volumeStoreDao.remove(destroyedStoreVO.getId());
}
} catch (Exception e2) {
s_logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2);
}
}
} catch (Exception e3) {
s_logger.warn("problem cleaning up secondary storage DB entries. ", e3);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class StorageManagerImpl method createLocalStorage.
@DB
@Override
public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws ConnectionException {
DataCenterVO dc = _dcDao.findById(host.getDataCenterId());
if (dc == null) {
return null;
}
boolean useLocalStorageForSystemVM = false;
Boolean isLocal = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dc.getId());
if (isLocal != null) {
useLocalStorageForSystemVM = isLocal.booleanValue();
}
if (!(dc.isLocalStorageEnabled() || useLocalStorageForSystemVM)) {
return null;
}
DataStore store;
try {
String hostAddress = pInfo.getHost();
if (host.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
hostAddress = "VMFS datastore: " + pInfo.getHostPath();
}
StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, pInfo.getHostPath(), pInfo.getUuid());
if (pool == null && host.getHypervisorType() == HypervisorType.VMware) {
// need to perform runtime upgrade here
if (pInfo.getHostPath().length() > 0) {
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, "", pInfo.getUuid());
}
}
if (pool == null) {
//the path can be different, but if they have the same uuid, assume they are the same storage
pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, pInfo.getUuid());
if (pool != null) {
s_logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool");
}
}
DataStoreProvider provider = _dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider();
DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle();
if (pool == null) {
Map<String, Object> params = new HashMap<String, Object>();
String name = (host.getName() + " Local Storage");
params.put("zoneId", host.getDataCenterId());
params.put("clusterId", host.getClusterId());
params.put("podId", host.getPodId());
params.put("url", pInfo.getPoolType().toString() + "://" + pInfo.getHost() + "/" + pInfo.getHostPath());
params.put("name", name);
params.put("localStorage", true);
params.put("details", pInfo.getDetails());
params.put("uuid", pInfo.getUuid());
params.put("providerName", provider.getName());
store = lifeCycle.initialize(params);
} else {
store = _dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
}
pool = _storagePoolDao.findById(store.getId());
if (pool.getStatus() != StoragePoolStatus.Maintenance && pool.getStatus() != StoragePoolStatus.Removed) {
HostScope scope = new HostScope(host.getId(), host.getClusterId(), host.getDataCenterId());
lifeCycle.attachHost(store, scope, pInfo);
}
} catch (Exception e) {
s_logger.warn("Unable to setup the local storage pool for " + host, e);
throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e);
}
return _dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class SnapshotManagerImpl method deleteSnapshotDirsForAccount.
@Override
public boolean deleteSnapshotDirsForAccount(long accountId) {
List<VolumeVO> volumes = _volsDao.findByAccount(accountId);
// The above call will list only non-destroyed volumes.
// So call this method before marking the volumes as destroyed.
// i.e Call them before the VMs for those volumes are destroyed.
boolean success = true;
for (VolumeVO volume : volumes) {
if (volume.getPoolId() == null) {
continue;
}
Long volumeId = volume.getId();
Long dcId = volume.getDataCenterId();
if (_snapshotDao.listByVolumeIdIncludingRemoved(volumeId).isEmpty()) {
// This volume doesn't have any snapshots. Nothing do delete.
continue;
}
List<DataStore> ssHosts = dataStoreMgr.getImageStoresByScope(new ZoneScope(dcId));
for (DataStore ssHost : ssHosts) {
String snapshotDir = TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR + "/" + accountId + "/" + volumeId;
DeleteSnapshotsDirCommand cmd = new DeleteSnapshotsDirCommand(ssHost.getTO(), snapshotDir);
EndPoint ep = _epSelector.select(ssHost);
Answer answer = null;
if (ep == null) {
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
s_logger.error(errMsg);
answer = new Answer(cmd, false, errMsg);
} else {
answer = ep.sendMessage(cmd);
}
if ((answer != null) && answer.getResult()) {
s_logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId);
} else {
success = false;
if (answer != null) {
s_logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri());
s_logger.error(answer.getDetails());
}
}
}
// Either way delete the snapshots for this volume.
List<SnapshotVO> snapshots = listSnapsforVolume(volumeId);
for (SnapshotVO snapshot : snapshots) {
SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE);
if (snapshotStrategy == null) {
s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshot.getId() + "'");
continue;
}
SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image);
if (snapshotStrategy.deleteSnapshot(snapshot.getId())) {
if (Type.MANUAL == snapshot.getRecurringType()) {
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.snapshot);
if (snapshotStoreRef != null) {
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.secondary_storage, new Long(snapshotStoreRef.getPhysicalSize()));
}
}
// Log event after successful deletion
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshot.getAccountId(), volume.getDataCenterId(), snapshot.getId(), snapshot.getName(), null, null, volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid());
}
}
}
// Returns true if snapshotsDir has been deleted for all volumes.
return success;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class VolumeApiServiceImpl method sendAttachVolumeCommand.
private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, Long deviceId) {
String errorMsg = "Failed to attach volume " + volumeToAttach.getName() + " to VM " + vm.getHostName();
boolean sendCommand = vm.getState() == State.Running;
AttachAnswer answer = null;
Long hostId = vm.getHostId();
if (hostId == null) {
hostId = vm.getLastHostId();
HostVO host = _hostDao.findById(hostId);
if (host != null && host.getHypervisorType() == HypervisorType.VMware) {
sendCommand = true;
}
}
HostVO host = null;
StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
if (hostId != null) {
host = _hostDao.findById(hostId);
if (host != null && host.getHypervisorType() == HypervisorType.XenServer && volumeToAttachStoragePool != null && volumeToAttachStoragePool.isManaged()) {
sendCommand = true;
}
}
// volumeToAttachStoragePool should be null if the VM we are attaching the disk to has never been started before
DataStore dataStore = volumeToAttachStoragePool != null ? dataStoreMgr.getDataStore(volumeToAttachStoragePool.getId(), DataStoreRole.Primary) : null;
// if we don't have a host, the VM we are attaching the disk to has never been started before
if (host != null) {
try {
volService.grantAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
} catch (Exception e) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
throw new CloudRuntimeException(e.getMessage());
}
}
if (sendCommand) {
if (host != null && host.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) {
volumeToAttach.setPath(volumeToAttach.get_iScsiName());
_volsDao.update(volumeToAttach.getId(), volumeToAttach);
}
DataTO volTO = volFactory.getVolume(volumeToAttach.getId()).getTO();
deviceId = getDeviceId(vm, deviceId);
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, volumeToAttach.getVolumeType(), deviceId, volumeToAttach.getPath(), vm.getServiceOfferingId(), volumeToAttach.getDiskOfferingId());
AttachCommand cmd = new AttachCommand(disk, vm.getInstanceName());
ChapInfo chapInfo = volService.getChapInfo(volFactory.getVolume(volumeToAttach.getId()), dataStore);
Map<String, String> details = new HashMap<String, String>();
disk.setDetails(details);
details.put(DiskTO.MANAGED, String.valueOf(volumeToAttachStoragePool.isManaged()));
details.put(DiskTO.STORAGE_HOST, volumeToAttachStoragePool.getHostAddress());
details.put(DiskTO.STORAGE_PORT, String.valueOf(volumeToAttachStoragePool.getPort()));
details.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeToAttach.getSize()));
details.put(DiskTO.IQN, volumeToAttach.get_iScsiName());
details.put(DiskTO.MOUNT_POINT, volumeToAttach.get_iScsiName());
details.put(DiskTO.PROTOCOL_TYPE, (volumeToAttach.getPoolType() != null) ? volumeToAttach.getPoolType().toString() : null);
if (chapInfo != null) {
details.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername());
details.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret());
details.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername());
details.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}
_userVmDao.loadDetails(vm);
Map<String, String> controllerInfo = new HashMap<String, String>();
controllerInfo.put(VmDetailConstants.ROOT_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER));
controllerInfo.put(VmDetailConstants.DATA_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER));
cmd.setControllerInfo(controllerInfo);
s_logger.debug("Attach volume id:" + volumeToAttach.getId() + " on VM id:" + vm.getId() + " has controller info:" + controllerInfo);
try {
answer = (AttachAnswer) _agentMgr.send(hostId, cmd);
} catch (Exception e) {
if (host != null) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
}
throw new CloudRuntimeException(errorMsg + " due to: " + e.getMessage());
}
}
if (!sendCommand || (answer != null && answer.getResult())) {
// Mark the volume as attached
if (sendCommand) {
DiskTO disk = answer.getDisk();
_volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), disk.getDiskSeq());
volumeToAttach = _volsDao.findById(volumeToAttach.getId());
if (volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) {
volumeToAttach.setPath(answer.getDisk().getPath());
_volsDao.update(volumeToAttach.getId(), volumeToAttach);
}
} else {
deviceId = getDeviceId(vm, deviceId);
_volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), deviceId);
}
// insert record for disk I/O statistics
VmDiskStatisticsVO diskstats = _vmDiskStatsDao.findBy(vm.getAccountId(), vm.getDataCenterId(), vm.getId(), volumeToAttach.getId());
if (diskstats == null) {
diskstats = new VmDiskStatisticsVO(vm.getAccountId(), vm.getDataCenterId(), vm.getId(), volumeToAttach.getId());
_vmDiskStatsDao.persist(diskstats);
}
return _volsDao.findById(volumeToAttach.getId());
} else {
if (answer != null) {
String details = answer.getDetails();
if (details != null && !details.isEmpty()) {
errorMsg += "; " + details;
}
}
if (host != null) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
}
throw new CloudRuntimeException(errorMsg);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.DataStore in project cloudstack by apache.
the class TemplateManagerImpl method createPrivateTemplate.
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", async = true)
public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) throws CloudRuntimeException {
final long templateId = command.getEntityId();
Long volumeId = command.getVolumeId();
Long snapshotId = command.getSnapshotId();
VMTemplateVO privateTemplate = null;
final Long accountId = CallContext.current().getCallingAccountId();
SnapshotVO snapshot = null;
VolumeVO volume = null;
try {
TemplateInfo tmplInfo = _tmplFactory.getTemplate(templateId, DataStoreRole.Image);
long zoneId = 0;
if (snapshotId != null) {
snapshot = _snapshotDao.findById(snapshotId);
zoneId = snapshot.getDataCenterId();
} else if (volumeId != null) {
volume = _volumeDao.findById(volumeId);
zoneId = volume.getDataCenterId();
}
DataStore store = _dataStoreMgr.getImageStore(zoneId);
if (store == null) {
throw new CloudRuntimeException("cannot find an image store for zone " + zoneId);
}
AsyncCallFuture<TemplateApiResult> future = null;
if (snapshotId != null) {
DataStoreRole dataStoreRole = ApiResponseHelper.getDataStoreRole(snapshot, _snapshotStoreDao, _dataStoreMgr);
SnapshotInfo snapInfo = _snapshotFactory.getSnapshot(snapshotId, dataStoreRole);
if (dataStoreRole == DataStoreRole.Image) {
if (snapInfo == null) {
snapInfo = _snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Primary);
if (snapInfo == null) {
throw new CloudRuntimeException("Cannot find snapshot " + snapshotId);
}
// We need to copy the snapshot onto secondary.
SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.BACKUP);
snapshotStrategy.backupSnapshot(snapInfo);
// Attempt to grab it again.
snapInfo = _snapshotFactory.getSnapshot(snapshotId, dataStoreRole);
if (snapInfo == null) {
throw new CloudRuntimeException("Cannot find snapshot " + snapshotId + " on secondary and could not create backup");
}
}
DataStore snapStore = snapInfo.getDataStore();
if (snapStore != null) {
// pick snapshot image store to create template
store = snapStore;
}
}
future = _tmpltSvr.createTemplateFromSnapshotAsync(snapInfo, tmplInfo, store);
} else if (volumeId != null) {
VolumeInfo volInfo = _volFactory.getVolume(volumeId);
future = _tmpltSvr.createTemplateFromVolumeAsync(volInfo, tmplInfo, store);
} else {
throw new CloudRuntimeException("Creating private Template need to specify snapshotId or volumeId");
}
CommandResult result = null;
try {
result = future.get();
if (result.isFailed()) {
privateTemplate = null;
s_logger.debug("Failed to create template" + result.getResult());
throw new CloudRuntimeException("Failed to create template" + result.getResult());
}
// create entries in template_zone_ref table
if (_dataStoreMgr.isRegionStore(store)) {
// template created on region store
_tmpltSvr.associateTemplateToZone(templateId, null);
} else {
VMTemplateZoneVO templateZone = new VMTemplateZoneVO(zoneId, templateId, new Date());
_tmpltZoneDao.persist(templateZone);
}
privateTemplate = _tmpltDao.findById(templateId);
TemplateDataStoreVO srcTmpltStore = _tmplStoreDao.findByStoreTemplate(store.getId(), templateId);
UsageEventVO usageEvent = new UsageEventVO(EventTypes.EVENT_TEMPLATE_CREATE, privateTemplate.getAccountId(), zoneId, privateTemplate.getId(), privateTemplate.getName(), null, privateTemplate.getSourceTemplateId(), srcTmpltStore.getPhysicalSize(), privateTemplate.getSize());
_usageEventDao.persist(usageEvent);
} catch (InterruptedException e) {
s_logger.debug("Failed to create template", e);
throw new CloudRuntimeException("Failed to create template", e);
} catch (ExecutionException e) {
s_logger.debug("Failed to create template", e);
throw new CloudRuntimeException("Failed to create template", e);
}
} finally {
if (privateTemplate == null) {
final VolumeVO volumeFinal = volume;
final SnapshotVO snapshotFinal = snapshot;
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
// template_store_ref entries should have been removed using our
// DataObject.processEvent command in case of failure, but clean
// it up here to avoid
// some leftovers which will cause removing template from
// vm_template table fail.
_tmplStoreDao.deletePrimaryRecordsForTemplate(templateId);
// Remove the template_zone_ref record
_tmpltZoneDao.deletePrimaryRecordsForTemplate(templateId);
// Remove the template record
_tmpltDao.expunge(templateId);
// decrement resource count
if (accountId != null) {
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.template);
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.secondary_storage, new Long(volumeFinal != null ? volumeFinal.getSize() : snapshotFinal.getSize()));
}
}
});
}
}
if (privateTemplate != null) {
return privateTemplate;
} else {
throw new CloudRuntimeException("Failed to create a template");
}
}
Aggregations