use of org.apache.cloudstack.engine.subsystem.api.storage.EndPoint in project cloudstack by apache.
the class StorageSystemDataMotionStrategy method handleCopyDataToSecondaryStorage.
/**
* This function is responsible for copying a volume from the managed store to a secondary store. This is used in two cases
* 1) When creating a template from a snapshot
* 2) When createSnapshot is called with location=SECONDARY
*
* @param snapshotInfo Source snapshot
* @param destData destination (can be template or snapshot)
* @param callback callback for async
*/
private void handleCopyDataToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
try {
snapshotInfo.processEvent(Event.CopyingRequested);
} catch (Exception ex) {
throw new CloudRuntimeException("This snapshot is not currently in a state where it can be used to create a template.");
}
HostVO hostVO = getHost(snapshotInfo);
boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshotInfo);
boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId());
boolean needCache = needCacheStorage(snapshotInfo, destData);
DataObject destOnStore = destData;
if (needCache) {
// creates an object in the DB for data to be cached
Scope selectedScope = pickCacheScopeForCopy(snapshotInfo, destData);
destOnStore = cacheMgr.getCacheObject(snapshotInfo, selectedScope);
destOnStore.processEvent(Event.CreateOnlyRequested);
}
if (usingBackendSnapshot && !computeClusterSupportsResign) {
String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId();
LOGGER.warn(noSupportForResignErrMsg);
throw new CloudRuntimeException(noSupportForResignErrMsg);
}
try {
if (usingBackendSnapshot) {
createVolumeFromSnapshot(hostVO, snapshotInfo, true);
}
DataStore srcDataStore = snapshotInfo.getDataStore();
String value = _configDao.getValue(Config.PrimaryStorageDownloadWait.toString());
int primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue()));
CopyCommand copyCommand = new CopyCommand(snapshotInfo.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
String errMsg = null;
CopyCmdAnswer copyCmdAnswer = null;
try {
// (because we passed in true as the third parameter to createVolumeFromSnapshot above).
if (!usingBackendSnapshot) {
_volumeService.grantAccess(snapshotInfo, hostVO, srcDataStore);
}
Map<String, String> srcDetails = getSnapshotDetails(snapshotInfo);
copyCommand.setOptions(srcDetails);
copyCmdAnswer = (CopyCmdAnswer) _agentMgr.send(hostVO.getId(), copyCommand);
if (!copyCmdAnswer.getResult()) {
// We were not able to copy. Handle it.
errMsg = copyCmdAnswer.getDetails();
throw new CloudRuntimeException(errMsg);
}
if (needCache) {
// If cached storage was needed (in case of object store as secondary
// storage), at this point, the data has been copied from the primary
// to the NFS cache by the hypervisor. We now invoke another copy
// command to copy this data from cache to secondary storage. We
// then cleanup the cache
destOnStore.processEvent(Event.OperationSuccessed, copyCmdAnswer);
CopyCommand cmd = new CopyCommand(destOnStore.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
EndPoint ep = selector.select(destOnStore, destData);
if (ep == null) {
errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
LOGGER.error(errMsg);
copyCmdAnswer = new CopyCmdAnswer(errMsg);
} else {
copyCmdAnswer = (CopyCmdAnswer) ep.sendMessage(cmd);
}
// clean up snapshot copied to staging
cacheMgr.deleteCacheObject(destOnStore);
}
} catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) {
String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : ";
LOGGER.warn(msg, ex);
throw new CloudRuntimeException(msg + ex.getMessage());
} finally {
_volumeService.revokeAccess(snapshotInfo, hostVO, srcDataStore);
if (copyCmdAnswer == null || !copyCmdAnswer.getResult()) {
if (copyCmdAnswer != null && !StringUtils.isEmpty(copyCmdAnswer.getDetails())) {
errMsg = copyCmdAnswer.getDetails();
if (needCache) {
cacheMgr.deleteCacheObject(destOnStore);
}
} else {
errMsg = "Unable to create template from snapshot";
}
}
try {
if (StringUtils.isEmpty(errMsg)) {
snapshotInfo.processEvent(Event.OperationSuccessed);
} else {
snapshotInfo.processEvent(Event.OperationFailed);
}
} catch (Exception ex) {
LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex);
}
}
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
} finally {
if (usingBackendSnapshot) {
deleteVolumeFromSnapshot(snapshotInfo);
}
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.EndPoint in project cloudstack by apache.
the class StorageSystemDataMotionStrategy method handleVolumeMigrationForXenServer.
private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
VirtualMachine vm = srcVolumeInfo.getAttachedVM();
if (vm == null || vm.getState() != VirtualMachine.State.Running) {
throw new CloudRuntimeException("Currently, a volume to migrate from non-managed storage to managed storage on XenServer must be attached to " + "a VM in the Running state.");
}
destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null);
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore());
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
HostVO hostVO = _hostDao.findById(vm.getHostId());
_volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore());
String value = _configDao.getValue(Config.MigrateWait.key());
int waitInterval = NumbersUtil.parseInt(value, Integer.parseInt(Config.MigrateWait.getDefaultValue()));
StoragePool destPool = (StoragePool) dataStoreMgr.getDataStore(destVolumeInfo.getDataStore().getId(), DataStoreRole.Primary);
MigrateVolumeCommand command = new MigrateVolumeCommand(srcVolumeInfo.getId(), srcVolumeInfo.getPath(), destPool, srcVolumeInfo.getAttachedVmName(), srcVolumeInfo.getVolumeType(), waitInterval, null);
Map<String, String> details = new HashMap<>();
details.put(DiskTO.MANAGED, Boolean.TRUE.toString());
details.put(DiskTO.IQN, destVolumeInfo.get_iScsiName());
details.put(DiskTO.STORAGE_HOST, destPool.getHostAddress());
details.put(DiskTO.PROTOCOL_TYPE, (destPool.getPoolType() != null) ? destPool.getPoolType().toString() : null);
command.setDestDetails(details);
EndPoint ep = selector.select(srcVolumeInfo, StorageAction.MIGRATEVOLUME);
Answer answer;
if (ep == null) {
String errMsg = "No remote endpoint to send command to; check if host or SSVM is down";
LOGGER.error(errMsg);
answer = new Answer(command, false, errMsg);
} else {
answer = ep.sendMessage(command);
}
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION);
if (answer == null || !answer.getResult()) {
handleFailedVolumeMigration(srcVolumeInfo, destVolumeInfo, hostVO);
throw new CloudRuntimeException("Failed to migrate volume with ID " + srcVolumeInfo.getId() + " to storage pool with ID " + destPool.getId());
} else {
handleSuccessfulVolumeMigration(srcVolumeInfo, destPool, (MigrateVolumeAnswer) answer);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.EndPoint in project cloudstack by apache.
the class EndpointSelectorTest method testMixZonePrimaryStorages.
@Test
public void testMixZonePrimaryStorages() {
Long srcStoreId = null;
Long destStoreId = imageStore.getId();
DataStore store = createPrimaryDataStore(ScopeType.ZONE);
srcStoreId = store.getId();
HostVO host = createHost(Hypervisor.HypervisorType.VMware);
addStorageToHost(store, host);
store = createPrimaryDataStore(ScopeType.ZONE);
host = createHost(Hypervisor.HypervisorType.VMware);
addStorageToHost(store, host);
Long xenStoreId = null;
store = createPrimaryDataStore(ScopeType.CLUSTER);
xenStoreId = store.getId();
host = createHost(Hypervisor.HypervisorType.XenServer);
addStorageToHost(store, host);
store = createPrimaryDataStore(ScopeType.CLUSTER);
host = createHost(Hypervisor.HypervisorType.XenServer);
addStorageToHost(store, host);
ZoneScope srcScope = new ZoneScope(dcId);
DataStore srcStore = mock(DataStore.class);
DataStore destStore = mock(DataStore.class);
when(srcStore.getScope()).thenReturn(srcScope);
when(srcStore.getRole()).thenReturn(DataStoreRole.Primary);
when(srcStore.getId()).thenReturn(srcStoreId);
when(destStore.getScope()).thenReturn(srcScope);
when(destStore.getRole()).thenReturn(DataStoreRole.Image);
when(destStore.getId()).thenReturn(destStoreId);
DataObject srcObj = mock(DataObject.class);
DataObject destObj = mock(DataObject.class);
when(srcObj.getDataStore()).thenReturn(srcStore);
when(destObj.getDataStore()).thenReturn(destStore);
EndPoint ep = endPointSelector.select(srcObj, destObj);
Assert.assertTrue(ep != null);
Long hostId = ep.getId();
HostVO newHost = hostDao.findById(hostId);
Assert.assertTrue(newHost.getHypervisorType() == Hypervisor.HypervisorType.VMware);
when(srcStore.getRole()).thenReturn(DataStoreRole.Image);
when(srcStore.getId()).thenReturn(destStoreId);
when(destStore.getId()).thenReturn(srcStoreId);
when(destStore.getRole()).thenReturn(DataStoreRole.Primary);
ep = endPointSelector.select(srcObj, destObj);
Assert.assertTrue(ep != null);
hostId = ep.getId();
newHost = hostDao.findById(hostId);
Assert.assertTrue(newHost.getHypervisorType() == Hypervisor.HypervisorType.VMware);
ClusterScope clusterScope = new ClusterScope(clusterId, podId, dcId);
when(srcStore.getRole()).thenReturn(DataStoreRole.Primary);
when(srcStore.getScope()).thenReturn(clusterScope);
when(srcStore.getId()).thenReturn(xenStoreId);
ep = endPointSelector.select(srcStore);
Assert.assertTrue(ep != null);
newHost = hostDao.findById(ep.getId());
Assert.assertTrue(newHost.getHypervisorType() == Hypervisor.HypervisorType.XenServer);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.EndPoint in project cloudstack by apache.
the class BaseImageStoreDriverImpl method copyAsync.
@Override
public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
if (!canCopy(srcdata, destData)) {
return;
}
if ((srcdata.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.TEMPLATE) || (srcdata.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) || (srcdata.getType() == DataObjectType.VOLUME && destData.getType() == DataObjectType.VOLUME)) {
int nMaxExecutionMinutes = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30);
CopyCommand cmd = new CopyCommand(srcdata.getTO(), destData.getTO(), nMaxExecutionMinutes * 60 * 1000, VirtualMachineManager.ExecuteInSequence.value());
Answer answer = null;
// Select host endpoint such that the load is balanced out
List<EndPoint> eps = _epSelector.findAllEndpointsForScope(srcdata.getDataStore());
if (eps == null || eps.isEmpty()) {
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
LOGGER.error(errMsg);
answer = new Answer(cmd, false, errMsg);
} else {
// select endpoint with least number of commands running on them
answer = sendToLeastBusyEndpoint(eps, cmd);
}
CopyCommandResult result = new CopyCommandResult("", answer);
callback.complete(result);
}
}
use of org.apache.cloudstack.engine.subsystem.api.storage.EndPoint in project cloudstack by apache.
the class BaseImageStoreDriverImpl method deleteAsync.
@Override
public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
CommandResult result = new CommandResult();
try {
DeleteCommand cmd = new DeleteCommand(data.getTO());
EndPoint ep = _epSelector.select(data);
Answer answer = null;
if (ep == null) {
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
LOGGER.error(errMsg);
answer = new Answer(cmd, false, errMsg);
} else {
answer = ep.sendMessage(cmd);
}
if (answer != null && !answer.getResult()) {
result.setResult(answer.getDetails());
}
} catch (Exception ex) {
LOGGER.debug("Unable to destoy " + data.getType().toString() + ": " + data.getId(), ex);
result.setResult(ex.toString());
}
callback.complete(result);
}
Aggregations