use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VolumeServiceImpl method managedCopyBaseImageCallback.
protected Void managedCopyBaseImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, ManagedCreateBaseImageContext<VolumeApiResult> context) {
CopyCommandResult result = callback.getResult();
VolumeInfo volumeInfo = context.getVolumeInfo();
VolumeApiResult res = new VolumeApiResult(volumeInfo);
if (result.isSuccess()) {
// volumeInfo.processEvent(Event.OperationSuccessed, result.getAnswer());
VolumeVO volume = volDao.findById(volumeInfo.getId());
CopyCmdAnswer answer = (CopyCmdAnswer) result.getAnswer();
TemplateObjectTO templateObjectTo = (TemplateObjectTO) answer.getNewData();
volume.setPath(templateObjectTo.getPath());
if (templateObjectTo.getFormat() != null) {
volume.setFormat(templateObjectTo.getFormat());
}
volDao.update(volume.getId(), volume);
} else {
volumeInfo.processEvent(Event.DestroyRequested);
res.setResult(result.getResult());
}
AsyncCallFuture<VolumeApiResult> future = context.getFuture();
future.complete(res);
return null;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VolumeServiceImpl method createVolumeFromSnapshotCallback.
protected Void createVolumeFromSnapshotCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CreateVolumeFromBaseImageContext<VolumeApiResult> context) {
CopyCommandResult result = callback.getResult();
VolumeInfo volume = (VolumeInfo) context.templateOnStore;
SnapshotInfo snapshot = context.snapshot;
VolumeApiResult apiResult = new VolumeApiResult(volume);
Event event = null;
if (result.isFailed()) {
apiResult.setResult(result.getResult());
event = Event.OperationFailed;
} else {
event = Event.OperationSuccessed;
}
try {
if (result.isSuccess()) {
volume.processEvent(event, result.getAnswer());
} else {
volume.processEvent(event);
}
_volumeDetailsDao.removeDetail(volume.getId(), SNAPSHOT_ID);
} catch (Exception e) {
s_logger.debug("create volume from snapshot failed", e);
apiResult.setResult(e.toString());
}
AsyncCallFuture<VolumeApiResult> future = context.future;
future.complete(apiResult);
return null;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VolumeServiceImpl method copyVolumeCallBack.
protected Void copyVolumeCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, CopyVolumeContext<VolumeApiResult> context) {
VolumeInfo srcVolume = context.srcVolume;
VolumeInfo destVolume = context.destVolume;
CopyCommandResult result = callback.getResult();
AsyncCallFuture<VolumeApiResult> future = context.future;
VolumeApiResult res = new VolumeApiResult(destVolume);
try {
if (result.isFailed()) {
res.setResult(result.getResult());
destVolume.processEvent(Event.MigrationCopyFailed);
srcVolume.processEvent(Event.OperationFailed);
destroyVolume(destVolume.getId());
destVolume = volFactory.getVolume(destVolume.getId());
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(destVolume);
destroyFuture.get();
future.complete(res);
return null;
}
srcVolume.processEvent(Event.OperationSuccessed);
destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer());
volDao.updateUuid(srcVolume.getId(), destVolume.getId());
_volumeStoreDao.updateVolumeId(srcVolume.getId(), destVolume.getId());
try {
destroyVolume(srcVolume.getId());
srcVolume = volFactory.getVolume(srcVolume.getId());
AsyncCallFuture<VolumeApiResult> destroyFuture = expungeVolumeAsync(srcVolume);
// If volume destroy fails, this could be because of vdi is still in use state, so wait and retry.
if (destroyFuture.get().isFailed()) {
Thread.sleep(5 * 1000);
destroyFuture = expungeVolumeAsync(srcVolume);
destroyFuture.get();
}
future.complete(res);
} catch (Exception e) {
s_logger.debug("failed to clean up volume on storage", e);
}
return null;
} catch (Exception e) {
s_logger.debug("Failed to process copy volume callback", e);
res.setResult(e.toString());
future.complete(res);
}
return null;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VolumeServiceImpl method destroyVolume.
@Override
@DB
public boolean destroyVolume(long volumeId) throws ConcurrentOperationException {
// mark volume entry in volumes table as destroy state
VolumeInfo vol = volFactory.getVolume(volumeId);
vol.stateTransit(Volume.Event.DestroyRequested);
snapshotMgr.deletePoliciesForVolume(volumeId);
vol.stateTransit(Volume.Event.OperationSucceeded);
return true;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo in project cloudstack by apache.
the class VmwareStorageMotionStrategy method migrateVmWithVolumesAcrossCluster.
private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool) throws AgentUnavailableException {
// Initiate migration of a virtual machine with it's volumes.
try {
List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerto = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
VolumeInfo volume = entry.getKey();
VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
StorageFilerTO filerTo = new StorageFilerTO((StoragePool) entry.getValue());
volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
}
// Migration across cluster needs to be done in three phases.
// 1. Send a migrate command to source resource to initiate migration
// Run validations against target!!
// 2. Complete the process. Update the volume details.
MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), migrateWithStorageCmd);
if (migrateWithStorageAnswer == null) {
s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
} else if (!migrateWithStorageAnswer.getResult()) {
s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + migrateWithStorageAnswer.getDetails());
throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + migrateWithStorageAnswer.getDetails());
} else {
// Update the volume details after migration.
updateVolumesAfterMigration(volumeToPool, migrateWithStorageAnswer.getVolumeTos());
}
s_logger.debug("Storage migration of VM " + vm.getInstanceName() + " completed successfully. Migrated to host " + destHost.getName());
return migrateWithStorageAnswer;
} catch (OperationTimedoutException e) {
s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e);
throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId());
}
}
Aggregations