use of org.apache.cloudstack.storage.to.VolumeObjectTO in project cloudstack by apache.
the class VmwareStorageProcessor method createVolumeFromSnapshot.
@Override
public Answer createVolumeFromSnapshot(CopyCommand cmd) {
DataTO srcData = cmd.getSrcTO();
SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData;
DataTO destData = cmd.getDestTO();
DataStoreTO pool = destData.getDataStore();
DataStoreTO imageStore = srcData.getDataStore();
if (!(imageStore instanceof NfsTO)) {
return new CopyCmdAnswer("unsupported protocol");
}
NfsTO nfsImageStore = (NfsTO) imageStore;
String primaryStorageNameLabel = pool.getUuid();
String secondaryStorageUrl = nfsImageStore.getUrl();
String backedUpSnapshotUuid = snapshot.getPath();
int index = backedUpSnapshotUuid.lastIndexOf(File.separator);
String backupPath = backedUpSnapshotUuid.substring(0, index);
backedUpSnapshotUuid = backedUpSnapshotUuid.substring(index + 1);
String details = null;
String newVolumeName = VmwareHelper.getVCenterSafeUuid();
VmwareContext context = hostService.getServiceContext(cmd);
try {
VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd);
ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel);
if (morPrimaryDs == null) {
String msg = "Unable to find datastore: " + primaryStorageNameLabel;
s_logger.error(msg);
throw new Exception(msg);
}
// strip off the extension since restoreVolumeFromSecStorage internally will append suffix there.
if (backedUpSnapshotUuid.endsWith(".ova")) {
backedUpSnapshotUuid = backedUpSnapshotUuid.replace(".ova", "");
} else if (backedUpSnapshotUuid.endsWith(".ovf")) {
backedUpSnapshotUuid = backedUpSnapshotUuid.replace(".ovf", "");
}
DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs);
restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, secondaryStorageUrl, backupPath, backedUpSnapshotUuid, (long) cmd.getWait() * 1000, _nfsVersion);
VolumeObjectTO newVol = new VolumeObjectTO();
newVol.setPath(newVolumeName);
return new CopyCmdAnswer(newVol);
} catch (Throwable e) {
if (e instanceof RemoteException) {
hostService.invalidateServiceContext(context);
}
s_logger.error("Unexpecpted exception ", e);
details = "create volume from snapshot exception: " + VmwareHelper.getExceptionMessage(e);
}
return new CopyCmdAnswer(details);
}
use of org.apache.cloudstack.storage.to.VolumeObjectTO in project cloudstack by apache.
the class VmwareStorageSubsystemCommandHandler method execute.
@Override
protected Answer execute(CopyCommand cmd) {
DataTO srcData = cmd.getSrcTO();
DataTO destData = cmd.getDestTO();
DataStoreTO srcDataStore = srcData.getDataStore();
DataStoreTO destDataStore = destData.getDataStore();
//if copied between s3 and nfs cache, go to resource
boolean needDelegation = false;
if (destDataStore instanceof NfsTO && destDataStore.getRole() == DataStoreRole.ImageCache) {
if (srcDataStore instanceof S3TO || srcDataStore instanceof SwiftTO) {
needDelegation = true;
}
}
if (srcDataStore.getRole() == DataStoreRole.ImageCache && destDataStore.getRole() == DataStoreRole.Image) {
//need to take extra processing for vmware, such as packing to ova, before sending to S3
if (srcData.getObjectType() == DataObjectType.VOLUME) {
NfsTO cacheStore = (NfsTO) srcDataStore;
String parentPath = storageResource.getRootDir(cacheStore.getUrl(), _nfsVersion);
VolumeObjectTO vol = (VolumeObjectTO) srcData;
String path = vol.getPath();
int index = path.lastIndexOf(File.separator);
String name = path.substring(index + 1);
storageManager.createOva(parentPath + File.separator + path, name);
vol.setPath(path + File.separator + name + ".ova");
} else if (srcData.getObjectType() == DataObjectType.TEMPLATE) {
// sync template from NFS cache to S3 in NFS migration to S3 case
storageManager.createOvaForTemplate((TemplateObjectTO) srcData);
} else if (srcData.getObjectType() == DataObjectType.SNAPSHOT) {
// pack ova first
// sync snapshot from NFS cache to S3 in NFS migration to S3 case
String parentPath = storageResource.getRootDir(srcDataStore.getUrl(), _nfsVersion);
SnapshotObjectTO snap = (SnapshotObjectTO) srcData;
String path = snap.getPath();
int index = path.lastIndexOf(File.separator);
String name = path.substring(index + 1);
String snapDir = path.substring(0, index);
storageManager.createOva(parentPath + File.separator + snapDir, name);
if (destData.getObjectType() == DataObjectType.TEMPLATE) {
//create template from snapshot on src at first, then copy it to s3
TemplateObjectTO cacheTemplate = (TemplateObjectTO) destData;
cacheTemplate.setDataStore(srcDataStore);
CopyCmdAnswer answer = (CopyCmdAnswer) processor.createTemplateFromSnapshot(cmd);
if (!answer.getResult()) {
return answer;
}
cacheTemplate.setDataStore(destDataStore);
TemplateObjectTO template = (TemplateObjectTO) answer.getNewData();
template.setDataStore(srcDataStore);
CopyCommand newCmd = new CopyCommand(template, destData, cmd.getWait(), cmd.executeInSequence());
Answer result = storageResource.defaultAction(newCmd);
//clean up template data on staging area
try {
DeleteCommand deleteCommand = new DeleteCommand(template);
storageResource.defaultAction(deleteCommand);
} catch (Exception e) {
s_logger.debug("Failed to clean up staging area:", e);
}
return result;
}
}
needDelegation = true;
}
if (srcData.getObjectType() == DataObjectType.SNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) {
//for back up snapshot, we need to do backup to cache, then to object store if object store is used.
if (cmd.getCacheTO() != null) {
cmd.setDestTO(cmd.getCacheTO());
CopyCmdAnswer answer = (CopyCmdAnswer) processor.backupSnapshot(cmd);
if (!answer.getResult()) {
return answer;
}
NfsTO cacheStore = (NfsTO) cmd.getCacheTO().getDataStore();
String parentPath = storageResource.getRootDir(cacheStore.getUrl(), _nfsVersion);
SnapshotObjectTO newSnapshot = (SnapshotObjectTO) answer.getNewData();
String path = newSnapshot.getPath();
int index = path.lastIndexOf(File.separator);
String name = path.substring(index + 1);
String dir = path.substring(0, index);
storageManager.createOva(parentPath + File.separator + dir, name);
newSnapshot.setPath(newSnapshot.getPath() + ".ova");
newSnapshot.setDataStore(cmd.getCacheTO().getDataStore());
CopyCommand newCmd = new CopyCommand(newSnapshot, destData, cmd.getWait(), cmd.executeInSequence());
Answer result = storageResource.defaultAction(newCmd);
//clean up data on staging area
try {
newSnapshot.setPath(path);
DeleteCommand deleteCommand = new DeleteCommand(newSnapshot);
storageResource.defaultAction(deleteCommand);
} catch (Exception e) {
s_logger.debug("Failed to clean up staging area:", e);
}
return result;
}
}
if (needDelegation) {
return storageResource.defaultAction(cmd);
} else {
return super.execute(cmd);
}
}
use of org.apache.cloudstack.storage.to.VolumeObjectTO in project cloudstack by apache.
the class VmwareStorageProcessor method copyVolumeFromImageCacheToPrimary.
@Override
public Answer copyVolumeFromImageCacheToPrimary(CopyCommand cmd) {
VolumeObjectTO srcVolume = (VolumeObjectTO) cmd.getSrcTO();
VolumeObjectTO destVolume = (VolumeObjectTO) cmd.getDestTO();
VmwareContext context = hostService.getServiceContext(cmd);
try {
NfsTO srcStore = (NfsTO) srcVolume.getDataStore();
DataStoreTO destStore = destVolume.getDataStore();
VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd);
String uuid = destStore.getUuid();
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, uuid);
if (morDatastore == null) {
URI uri = new URI(destStore.getUrl());
morDatastore = hyperHost.mountDatastore(false, uri.getHost(), 0, uri.getPath(), destStore.getUuid().replace("-", ""));
if (morDatastore == null) {
throw new Exception("Unable to mount storage pool on host. storeUrl: " + uri.getHost() + ":/" + uri.getPath());
}
}
Pair<String, String> result = copyVolumeFromSecStorage(hyperHost, srcVolume.getPath(), new DatastoreMO(context, morDatastore), srcStore.getUrl(), (long) cmd.getWait() * 1000, _nfsVersion);
deleteVolumeDirOnSecondaryStorage(result.first(), srcStore.getUrl(), _nfsVersion);
VolumeObjectTO newVolume = new VolumeObjectTO();
newVolume.setPath(result.second());
return new CopyCmdAnswer(newVolume);
} catch (Throwable t) {
if (t instanceof RemoteException) {
hostService.invalidateServiceContext(context);
}
String msg = "Unable to execute CopyVolumeCommand due to exception";
s_logger.error(msg, t);
return new CopyCmdAnswer("copy volume secondary to primary failed due to exception: " + VmwareHelper.getExceptionMessage(t));
}
}
use of org.apache.cloudstack.storage.to.VolumeObjectTO in project cloudstack by apache.
the class LibvirtComputingResource method createVbd.
public void createVbd(final Connect conn, final VirtualMachineTO vmSpec, final String vmName, final LibvirtVMDef vm) throws InternalErrorException, LibvirtException, URISyntaxException {
final List<DiskTO> disks = Arrays.asList(vmSpec.getDisks());
Collections.sort(disks, new Comparator<DiskTO>() {
@Override
public int compare(final DiskTO arg0, final DiskTO arg1) {
return arg0.getDiskSeq() > arg1.getDiskSeq() ? 1 : -1;
}
});
for (final DiskTO volume : disks) {
KVMPhysicalDisk physicalDisk = null;
KVMStoragePool pool = null;
final DataTO data = volume.getData();
if (volume.getType() == Volume.Type.ISO && data.getPath() != null) {
final NfsTO nfsStore = (NfsTO) data.getDataStore();
final String volPath = nfsStore.getUrl() + File.separator + data.getPath();
final int index = volPath.lastIndexOf("/");
final String volDir = volPath.substring(0, index);
final String volName = volPath.substring(index + 1);
final KVMStoragePool secondaryStorage = _storagePoolMgr.getStoragePoolByURI(volDir);
physicalDisk = secondaryStorage.getPhysicalDisk(volName);
} else if (volume.getType() != Volume.Type.ISO) {
final PrimaryDataStoreTO store = (PrimaryDataStoreTO) data.getDataStore();
physicalDisk = _storagePoolMgr.getPhysicalDisk(store.getPoolType(), store.getUuid(), data.getPath());
pool = physicalDisk.getPool();
}
String volPath = null;
if (physicalDisk != null) {
volPath = physicalDisk.getPath();
}
// check for disk activity, if detected we should exit because vm is running elsewhere
if (_diskActivityCheckEnabled && physicalDisk != null && physicalDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
s_logger.debug("Checking physical disk file at path " + volPath + " for disk activity to ensure vm is not running elsewhere");
try {
HypervisorUtils.checkVolumeFileForActivity(volPath, _diskActivityCheckTimeoutSeconds, _diskActivityInactiveThresholdMilliseconds, _diskActivityCheckFileSizeMin);
} catch (final IOException ex) {
throw new CloudRuntimeException("Unable to check physical disk file for activity", ex);
}
s_logger.debug("Disk activity check cleared");
}
// if params contains a rootDiskController key, use its value (this is what other HVs are doing)
DiskDef.DiskBus diskBusType = getDiskModelFromVMDetail(vmSpec);
if (diskBusType == null) {
diskBusType = getGuestDiskModel(vmSpec.getPlatformEmulator());
}
// I'm not sure why previously certain DATADISKs were hard-coded VIRTIO and others not, however this
// maintains existing functionality with the exception that SCSI will override VIRTIO.
DiskDef.DiskBus diskBusTypeData = (diskBusType == DiskDef.DiskBus.SCSI) ? diskBusType : DiskDef.DiskBus.VIRTIO;
final DiskDef disk = new DiskDef();
if (volume.getType() == Volume.Type.ISO) {
if (volPath == null) {
/* Add iso as placeholder */
disk.defISODisk(null);
} else {
disk.defISODisk(volPath);
}
} else {
final int devId = volume.getDiskSeq().intValue();
if (diskBusType == DiskDef.DiskBus.SCSI) {
disk.setQemuDriver(true);
disk.setDiscard(DiscardType.UNMAP);
}
if (pool.getType() == StoragePoolType.RBD) {
/*
For RBD pools we use the secret mechanism in libvirt.
We store the secret under the UUID of the pool, that's why
we pass the pool's UUID as the authSecret
*/
disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(), pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
} else if (pool.getType() == StoragePoolType.Gluster) {
final String mountpoint = pool.getLocalPath();
final String path = physicalDisk.getPath();
final String glusterVolume = pool.getSourceDir().replace("/", "");
disk.defNetworkBasedDisk(glusterVolume + path.replace(mountpoint, ""), pool.getSourceHost(), pool.getSourcePort(), null, null, devId, diskBusType, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
} else if (pool.getType() == StoragePoolType.CLVM || physicalDisk.getFormat() == PhysicalDiskFormat.RAW) {
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusType);
} else {
if (volume.getType() == Volume.Type.DATADISK) {
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData, DiskDef.DiskFmtType.QCOW2);
} else {
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
}
}
}
if (data instanceof VolumeObjectTO) {
final VolumeObjectTO volumeObjectTO = (VolumeObjectTO) data;
disk.setSerial(diskUuidToSerial(volumeObjectTO.getUuid()));
if (volumeObjectTO.getBytesReadRate() != null && volumeObjectTO.getBytesReadRate() > 0) {
disk.setBytesReadRate(volumeObjectTO.getBytesReadRate());
}
if (volumeObjectTO.getBytesWriteRate() != null && volumeObjectTO.getBytesWriteRate() > 0) {
disk.setBytesWriteRate(volumeObjectTO.getBytesWriteRate());
}
if (volumeObjectTO.getIopsReadRate() != null && volumeObjectTO.getIopsReadRate() > 0) {
disk.setIopsReadRate(volumeObjectTO.getIopsReadRate());
}
if (volumeObjectTO.getIopsWriteRate() != null && volumeObjectTO.getIopsWriteRate() > 0) {
disk.setIopsWriteRate(volumeObjectTO.getIopsWriteRate());
}
if (volumeObjectTO.getCacheMode() != null) {
disk.setCacheMode(DiskDef.DiskCacheMode.valueOf(volumeObjectTO.getCacheMode().toString().toUpperCase()));
}
}
vm.getDevices().addDevice(disk);
}
if (vmSpec.getType() != VirtualMachine.Type.User) {
if (_sysvmISOPath != null) {
final DiskDef iso = new DiskDef();
iso.defISODisk(_sysvmISOPath);
vm.getDevices().addDevice(iso);
}
}
// For LXC, find and add the root filesystem, rbd data disks
if (HypervisorType.LXC.toString().toLowerCase().equals(vm.getHvsType())) {
for (final DiskTO volume : disks) {
final DataTO data = volume.getData();
final PrimaryDataStoreTO store = (PrimaryDataStoreTO) data.getDataStore();
if (volume.getType() == Volume.Type.ROOT) {
final KVMPhysicalDisk physicalDisk = _storagePoolMgr.getPhysicalDisk(store.getPoolType(), store.getUuid(), data.getPath());
final FilesystemDef rootFs = new FilesystemDef(physicalDisk.getPath(), "/");
vm.getDevices().addDevice(rootFs);
} else if (volume.getType() == Volume.Type.DATADISK) {
final KVMPhysicalDisk physicalDisk = _storagePoolMgr.getPhysicalDisk(store.getPoolType(), store.getUuid(), data.getPath());
final KVMStoragePool pool = physicalDisk.getPool();
if (StoragePoolType.RBD.equals(pool.getType())) {
final int devId = volume.getDiskSeq().intValue();
final String device = mapRbdDevice(physicalDisk);
if (device != null) {
s_logger.debug("RBD device on host is: " + device);
final DiskDef diskdef = new DiskDef();
diskdef.defBlockBasedDisk(device, devId, DiskDef.DiskBus.VIRTIO);
diskdef.setQemuDriver(false);
vm.getDevices().addDevice(diskdef);
} else {
throw new InternalErrorException("Error while mapping RBD device on host");
}
}
}
}
}
}
use of org.apache.cloudstack.storage.to.VolumeObjectTO in project cloudstack by apache.
the class VmwareStorageManagerImpl method execute.
@Override
public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSnapshotCommand cmd) {
List<VolumeObjectTO> volumeTOs = cmd.getVolumeTOs();
String vmName = cmd.getVmName();
String vmSnapshotName = cmd.getTarget().getSnapshotName();
String vmSnapshotDesc = cmd.getTarget().getDescription();
boolean snapshotMemory = cmd.getTarget().getType() == VMSnapshot.Type.DiskAndMemory;
boolean quiescevm = cmd.getTarget().getQuiescevm();
VirtualMachineMO vmMo = null;
VmwareContext context = hostService.getServiceContext(cmd);
try {
VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd);
// wait if there are already VM snapshot task running
ManagedObjectReference taskmgr = context.getServiceContent().getTaskManager();
List<ManagedObjectReference> tasks = context.getVimClient().getDynamicProperty(taskmgr, "recentTask");
for (ManagedObjectReference taskMor : tasks) {
TaskInfo info = (TaskInfo) (context.getVimClient().getDynamicProperty(taskMor, "info"));
if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) {
if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) {
s_logger.debug("There is already a VM snapshot task running, wait for it");
context.getVimClient().waitForTask(taskMor);
}
}
}
vmMo = hyperHost.findVmOnHyperHost(vmName);
if (vmMo == null) {
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
}
if (vmMo == null) {
String msg = "Unable to find VM for CreateVMSnapshotCommand";
s_logger.info(msg);
return new CreateVMSnapshotAnswer(cmd, false, msg);
} else {
if (vmMo.getSnapshotMor(vmSnapshotName) != null) {
s_logger.info("VM snapshot " + vmSnapshotName + " already exists");
} else if (!vmMo.createSnapshot(vmSnapshotName, vmSnapshotDesc, snapshotMemory, quiescevm)) {
return new CreateVMSnapshotAnswer(cmd, false, "Unable to create snapshot due to esxi internal failed");
}
Map<String, String> mapNewDisk = getNewDiskMap(vmMo);
setVolumeToPathAndSize(volumeTOs, mapNewDisk, context, hyperHost, cmd.getVmName());
return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), volumeTOs);
}
} catch (Exception e) {
String msg = e.getMessage();
s_logger.error("failed to create snapshot for vm:" + vmName + " due to " + msg);
try {
if (vmMo.getSnapshotMor(vmSnapshotName) != null) {
vmMo.removeSnapshot(vmSnapshotName, false);
}
} catch (Exception e1) {
s_logger.info("[ignored]" + "error during snapshot remove: " + e1.getLocalizedMessage());
}
return new CreateVMSnapshotAnswer(cmd, false, e.getMessage());
}
}
Aggregations