use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method orchestrateVPoolChanges.
/**
* Invokes the block orchestrator for a vpool change operation.
*
* @param volumes The volumes undergoing the vpool change.
* @param descriptors The prepared volume descriptors.
* @param taskId The task identifier.
*/
private void orchestrateVPoolChanges(List<Volume> volumes, List<VolumeDescriptor> descriptors, String taskId) {
try {
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.changeVirtualPool(descriptors, taskId);
} catch (InternalException e) {
if (s_logger.isErrorEnabled()) {
s_logger.error("Controller error", e);
}
String errMsg = String.format("Controller error on changeVolumeVirtualPool: %s", e.getMessage());
Operation statusUpdate = new Operation(Operation.Status.error.name(), errMsg);
for (Volume volume : volumes) {
_dbClient.updateTaskOpStatus(Volume.class, volume.getId(), taskId, statusUpdate);
}
throw e;
}
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method changeVirtualArrayForVolumes.
/**
* {@inheritDoc}
*/
@Override
public void changeVirtualArrayForVolumes(List<Volume> volumes, BlockConsistencyGroup cg, List<Volume> cgVolumes, VirtualArray newVirtualArray, String taskId) throws InternalException {
// if they remove the snapshots, they can perform the varray change.
for (Volume volume : volumes) {
List<BlockSnapshot> snapshots = getSnapshots(volume);
if (!snapshots.isEmpty()) {
for (BlockSnapshot snapshot : snapshots) {
if (!snapshot.getInactive()) {
throw APIException.badRequests.volumeForVarrayChangeHasSnaps(volume.getId().toString());
}
}
}
// If the volume has mirrors then varray change will not
// be allowed. User needs to explicitly delete mirrors first.
// This is applicable for both Local and Distributed volumes.
// For distributed volume getMirrors will get mirror if any
// on source or HA side.
StringSet mirrorURIs = volume.getMirrors();
if (mirrorURIs != null && !mirrorURIs.isEmpty()) {
List<VplexMirror> mirrors = _dbClient.queryObject(VplexMirror.class, StringSetUtil.stringSetToUriList(mirrorURIs));
if (mirrors != null && !mirrors.isEmpty()) {
throw APIException.badRequests.volumeForVarrayChangeHasMirrors(volume.getId().toString(), volume.getLabel());
}
}
}
// vpool change.
if ((cg != null) && (volumes.size() > _maxCgVolumesForMigration)) {
throw APIException.badRequests.cgContainsTooManyVolumesForVArrayChange(cg.getLabel(), volumes.size(), _maxCgVolumesForMigration);
}
// we don't allow the varray change.
if ((cg != null) && (cg.checkForType(Types.LOCAL)) && (cgVolumes.size() > 1)) {
verifyTargetSystemsForCGDataMigration(volumes, null, newVirtualArray.getId());
}
// Create the volume descriptors for the virtual array change.
List<VolumeDescriptor> descriptors = createVolumeDescriptorsForVarrayChange(volumes, newVirtualArray, taskId);
try {
// Orchestrate the virtual array change.
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.changeVirtualArray(descriptors, taskId);
s_logger.info("Successfully invoked block orchestrator.");
} catch (InternalException e) {
s_logger.error("Controller error", e);
for (VolumeDescriptor descriptor : descriptors) {
// migration targets and migrations.
if (VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME.equals(descriptor.getType())) {
_dbClient.error(Volume.class, descriptor.getVolumeURI(), taskId, e);
_dbClient.error(Migration.class, descriptor.getMigrationId(), taskId, e);
}
}
throw e;
}
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class VMAX3BlockSnapshotSessionApiImpl method createSnapshotSession.
/**
* {@inheritDoc}
*/
@Override
public void createSnapshotSession(BlockObject sourceObj, URI snapSessionURI, List<List<URI>> snapSessionSnapshotURIs, String copyMode, String taskId) {
// Invoke the BlockDeviceController to create the array snapshot session and create and link
// target volumes as necessary.
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, sourceObj.getStorageController());
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SNAPSHOT_SESSION_COPY_MODE, copyMode);
VolumeDescriptor sessionDescriptor = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_SNAPSHOT_SESSION, storageSystem.getId(), snapSessionURI, null, sourceObj.getConsistencyGroup(), capabilities, snapSessionSnapshotURIs);
List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
volumeDescriptors.add(sessionDescriptor);
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.createSnapshotSession(volumeDescriptors, taskId);
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class DefaultBlockFullCopyApiImpl method create.
/**
* {@inheritDoc}
*/
@Override
public TaskList create(List<BlockObject> fcSourceObjList, VirtualArray varray, String name, boolean createInactive, int count, String taskId) {
// Get the placement recommendations for the full copies and
// prepare the ViPR volumes to represent the full copies.
// TBD We are getting recommendations one at a time instead
// of for all full copies at the same time as was done
// previously. However, now we are allowing for creating
// full copies for multiple volume form a CG. These volumes
// could have different vpools and sizes. Therefore, I don't
// see how we can get them at the same time for all volumes
// as the capabilities could be different. I guess the
// possible result is that if the volumes are the same, they
// could be placed in the same storage pool and if the pool
// is approaching capacity, there may not actually be enough
// space in the recommended pool.
int sourceCounter = 0;
List<Volume> volumesList = new ArrayList<Volume>();
BlockObject aFCSource = null;
Map<URI, VirtualArray> vArrayCache = new HashMap<URI, VirtualArray>();
List<BlockObject> sortedSourceObjectList = sortFullCopySourceList(fcSourceObjList);
try {
for (BlockObject fcSourceObj : sortedSourceObjectList) {
// Make sure when there are multiple source objects,
// each full copy has a unique name.
aFCSource = fcSourceObj;
// volumes in VolumeGroup can be from different vArrays
varray = getVarrayFromCache(vArrayCache, fcSourceObj.getVirtualArray());
String copyName = null;
boolean inApplication = false;
if (aFCSource instanceof Volume && ((Volume) aFCSource).getApplication(_dbClient) != null) {
inApplication = true;
}
if (NullColumnValueGetter.isNotNullValue(fcSourceObj.getReplicationGroupInstance()) && inApplication) {
copyName = name + "-" + fcSourceObj.getReplicationGroupInstance() + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : "");
} else {
copyName = name + (sortedSourceObjectList.size() > 1 ? "-" + ++sourceCounter : "");
}
VirtualPool vpool = BlockFullCopyUtils.queryFullCopySourceVPool(fcSourceObj, _dbClient);
VirtualPoolCapabilityValuesWrapper capabilities = getCapabilitiesForFullCopyCreate(fcSourceObj, vpool, count);
List<VolumeRecommendation> placementRecommendations = getPlacementRecommendations(fcSourceObj, capabilities, varray, vpool.getId());
volumesList.addAll(prepareClonesForEachRecommendation(copyName, name, fcSourceObj, capabilities, createInactive, placementRecommendations));
}
} catch (Exception ex) {
handlePlacementFailure(volumesList);
throw ex;
}
// get volume descriptors
List<VolumeDescriptor> volumeDescriptors = prepareVolumeDescriptorsForFullCopy(volumesList, createInactive);
// get all tasks
TaskList tasks = getTasksForCreateFullCopy(aFCSource, volumesList, taskId);
try {
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.createFullCopy(volumeDescriptors, taskId);
} catch (InternalException ie) {
handleFailedRequest(taskId, tasks, volumesList, ie, true);
}
return tasks;
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method expandVolume.
/**
* {@inheritDoc}
*/
@Override
public void expandVolume(Volume vplexVolume, long newSize, String taskId) throws InternalException {
URI vplexURI = vplexVolume.getStorageController();
if (isNativeVolumeExpansionSupported(vplexVolume, newSize)) {
// Expand the passed VPlex virtual volume by natively
// expanding the backend volume(s).
// TODO: At the moment, native expansion go via block orchestration controller. JIRA CTRL-5336 filed for this.
// Expand via migration still follows the old way of doing things and this needs to be changed.
s_logger.info("VPLEX volume {} will be expanded natively.", vplexVolume.getId());
List<VolumeDescriptor> volumeDescriptors = createVolumeDescriptorsForNativeExpansion(Arrays.asList(vplexVolume.getId()));
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
for (VolumeDescriptor volDesc : volumeDescriptors) {
volDesc.setVolumeSize(newSize);
}
controller.expandVolume(volumeDescriptors, taskId);
} else {
// A list of the volumes satisfying the new size to
// which the data on the current backend volumes
// will be migrated.
s_logger.info("VPLEX volume {} will be expanded by migration.", vplexVolume.getId());
List<URI> newVolumes = new ArrayList<URI>();
// A Map containing a migration for each new backend
// volume
Map<URI, URI> migrationMap = new HashMap<URI, URI>();
// A map that specifies the storage pool in which
// each new volume should be created.
Map<URI, URI> poolVolumeMap = new HashMap<URI, URI>();
// Get the VPlex system.
StorageSystem vplexSystem = _permissionsHelper.getObjectById(vplexURI, StorageSystem.class);
// Prepare the backend volume(s) for migration.
StringSet assocVolumeIds = vplexVolume.getAssociatedVolumes();
if (null == assocVolumeIds || assocVolumeIds.isEmpty()) {
s_logger.error("VPLEX volume {} has no backend volumes.", vplexVolume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(vplexVolume.forDisplay());
}
for (String assocVolumeId : assocVolumeIds) {
Volume assocVolume = _permissionsHelper.getObjectById(URI.create(assocVolumeId), Volume.class);
VirtualArray assocVolumeNH = _permissionsHelper.getObjectById(assocVolume.getVirtualArray(), VirtualArray.class);
VirtualPool assocVolumeCos = _permissionsHelper.getObjectById(assocVolume.getVirtualPool(), VirtualPool.class);
prepareBackendVolumeForMigration(vplexSystem, vplexVolume, assocVolume, assocVolumeNH, assocVolumeCos, newSize, taskId, newVolumes, migrationMap, poolVolumeMap);
}
// Use the VPlex controller to expand the passed VPlex virtual
// volume by migrating the backend volume(s) to the migration
// target(s) of the new size.
VPlexController controller = getController();
controller.expandVolumeUsingMigration(vplexURI, vplexVolume.getId(), newVolumes, migrationMap, poolVolumeMap, newSize, taskId);
}
}
Aggregations