use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method expandVolume.
/**
* {@inheritDoc}
*/
@Override
public void expandVolume(Volume volume, long newSize, String taskId) throws InternalException {
Long originalVolumeSize = volume.getCapacity();
List<URI> replicationSetVolumes = RPHelper.getReplicationSetVolumes(volume, _dbClient);
Map<URI, StorageSystem> volumeStorageSystems = new HashMap<URI, StorageSystem>();
// provisioned capacity into the computeProtectionAllocation Capacity.
for (URI rpVolumeURI : replicationSetVolumes) {
Volume rpVolume = _dbClient.queryObject(Volume.class, rpVolumeURI);
Volume vmaxVolume = null;
StorageSystem vmaxStorageSystem = null;
if (rpVolume.getAssociatedVolumes() != null && !rpVolume.getAssociatedVolumes().isEmpty()) {
// Check backend volumes for VPLEX
for (String backingVolumeStr : rpVolume.getAssociatedVolumes()) {
Volume backingVolume = _dbClient.queryObject(Volume.class, URI.create(backingVolumeStr));
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, backingVolume.getStorageController());
if (storageSystem.getSystemType().equalsIgnoreCase(DiscoveredDataObject.Type.vmax.toString())) {
vmaxVolume = backingVolume;
vmaxStorageSystem = storageSystem;
break;
}
}
} else {
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, rpVolume.getStorageController());
if (storageSystem.getSystemType().equalsIgnoreCase(DiscoveredDataObject.Type.vmax.toString())) {
vmaxVolume = rpVolume;
vmaxStorageSystem = storageSystem;
}
}
if (vmaxVolume != null && vmaxStorageSystem != null) {
// Set the requested size to what the VMAX Meta utils determines will possibly be the provisioned
// capacity.
// All the other volumes, will need to be the same size as well or else RP will have a fit.
newSize = RPHelper.computeVmaxVolumeProvisionedCapacity(newSize, vmaxVolume, vmaxStorageSystem, _dbClient);
_log.info(String.format("VMAX volume detected, expand size re-calculated to [%d]", newSize));
// No need to continue, newSize has been calculated.
break;
}
}
try {
List<Volume> allVolumesToUpdateCapacity = new ArrayList<Volume>();
Map<URI, String> associatedVolumePersonalityMap = new HashMap<URI, String>();
for (URI rpVolumeURI : replicationSetVolumes) {
Volume rpVolume = _dbClient.queryObject(Volume.class, rpVolumeURI);
if (rpVolume.getAssociatedVolumes() != null && !rpVolume.getAssociatedVolumes().isEmpty()) {
// Check backend volumes for VPLEX
for (String backingVolumeStr : rpVolume.getAssociatedVolumes()) {
Volume backingVolume = _dbClient.queryObject(Volume.class, URI.create(backingVolumeStr));
allVolumesToUpdateCapacity.add(backingVolume);
associatedVolumePersonalityMap.put(backingVolume.getId(), rpVolume.getPersonality());
addVolumeStorageSystem(volumeStorageSystems, backingVolume);
}
} else {
allVolumesToUpdateCapacity.add(rpVolume);
addVolumeStorageSystem(volumeStorageSystems, rpVolume);
}
}
if (!capacitiesCanMatch(volumeStorageSystems)) {
Map<Volume.PersonalityTypes, Long> capacities = setUnMatchedCapacities(allVolumesToUpdateCapacity, associatedVolumePersonalityMap, true, newSize);
_log.info("Capacities for source and target of the Volume Expand request cannot match due to the differences in array types");
_log.info("Expand Volume requested size : {}", newSize);
_log.info("Expand source calculated size : {}", capacities.get(Volume.PersonalityTypes.SOURCE));
_log.info("Expand target calcaluted size : {}", capacities.get(Volume.PersonalityTypes.TARGET));
List<VolumeDescriptor> volumeDescriptors = createVolumeDescriptors(null, replicationSetVolumes, null, null, null);
for (VolumeDescriptor volDesc : volumeDescriptors) {
if (volDesc.getType() == VolumeDescriptor.Type.RP_VPLEX_VIRT_SOURCE) {
volDesc.setVolumeSize(capacities.get(Volume.PersonalityTypes.SOURCE));
} else if ((volDesc.getType() == VolumeDescriptor.Type.RP_TARGET) || (volDesc.getType() == VolumeDescriptor.Type.RP_VPLEX_VIRT_TARGET)) {
volDesc.setVolumeSize(capacities.get(Volume.PersonalityTypes.TARGET));
}
}
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.expandVolume(volumeDescriptors, taskId);
} else {
// Step 2: Just because we have a RP source/target on VMAX and have a size calculated doesn't mean VNX can honor it.
// The trick is that the size of the volume must be a multiple of 512 for VNX and 520 for VMAX because of the different
// block sizes.
// We will find a number (in bytes) that is greater than the requested size and meets the above criteria and use that our
// final expanded volume size.
long normalizedRequestSize = computeProtectionCapacity(replicationSetVolumes, newSize, true, false, null);
// Step 3: Call the controller to do the expand.
_log.info("Expand volume request size : {}", normalizedRequestSize);
List<VolumeDescriptor> volumeDescriptors = createVolumeDescriptors(null, replicationSetVolumes, null, null, null);
for (VolumeDescriptor volDesc : volumeDescriptors) {
volDesc.setVolumeSize(normalizedRequestSize);
}
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.expandVolume(volumeDescriptors, taskId);
}
} catch (ControllerException e) {
// Set the volume size back to original size before the expand request
for (URI volumeURI : replicationSetVolumes) {
Volume rpVolume = _dbClient.queryObject(Volume.class, volumeURI);
rpVolume.setCapacity(originalVolumeSize);
_dbClient.updateObject(rpVolume);
// Reset the backing volumes as well, if they exist
if (rpVolume.getAssociatedVolumes() != null && !rpVolume.getAssociatedVolumes().isEmpty()) {
// Check backend volumes for VPLEX
for (String backingVolumeStr : rpVolume.getAssociatedVolumes()) {
Volume backingVolume = _dbClient.queryObject(Volume.class, URI.create(backingVolumeStr));
backingVolume.setCapacity(originalVolumeSize);
_dbClient.updateObject(backingVolume);
}
}
}
throw APIException.badRequests.volumeNotExpandable(volume.getLabel());
}
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method createVolumes.
@Override
public TaskList createVolumes(VolumeCreate param, Project project, VirtualArray varray, VirtualPool vpool, Map<VpoolUse, List<Recommendation>> recommendationMap, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper capabilities) throws InternalException {
List<Recommendation> recommendations = recommendationMap.get(VpoolUse.ROOT);
// List of volumes to be prepared
List<URI> volumeURIs = new ArrayList<URI>();
// Volume label from the param
String volumeLabel = param.getName();
// List to store the volume descriptors for the Block Orchestration
List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
// Store capabilities of the CG, so they make it down to the controller
if (vpool.getRpCopyMode() != null) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.RP_COPY_MODE, vpool.getRpCopyMode());
}
if (vpool.getRpRpoType() != null && NullColumnValueGetter.isNotNullValue(vpool.getRpRpoType())) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.RP_RPO_TYPE, vpool.getRpRpoType());
}
if (vpool.checkRpRpoValueSet()) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.RP_RPO_VALUE, vpool.getRpRpoValue());
}
// Get the first recommendation, we need to figure out if this is a change vpool
RPProtectionRecommendation rpProtectionRec = (RPProtectionRecommendation) recommendations.get(0);
boolean isChangeVpool = (rpProtectionRec.getVpoolChangeVolume() != null);
boolean isChangeVpoolForProtectedVolume = rpProtectionRec.isVpoolChangeProtectionAlreadyExists();
// for change vpool, save off the original source volume in case we need to roll back
URI oldVpoolId = null;
if (isChangeVpool || isChangeVpoolForProtectedVolume) {
Volume changeVpoolVolume = _dbClient.queryObject(Volume.class, rpProtectionRec.getVpoolChangeVolume());
oldVpoolId = changeVpoolVolume.getVirtualPool();
}
try {
// Prepare the volumes
prepareRecommendedVolumes(param, task, taskList, project, varray, vpool, capabilities.getResourceCount(), recommendations, volumeLabel, capabilities, volumeDescriptors, volumeURIs);
// Execute the volume creations requests for each recommendation.
Iterator<Recommendation> recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
RPProtectionRecommendation recommendation = (RPProtectionRecommendation) recommendationsIter.next();
volumeDescriptors.addAll(createVolumeDescriptors(recommendation, volumeURIs, capabilities, oldVpoolId, param.getComputeResource()));
logDescriptors(volumeDescriptors);
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
// TODO might be able to use param.getSize() instead of the below code to find requestedVolumeCapactity
Long requestedVolumeCapactity = 0L;
for (URI volumeURI : volumeURIs) {
Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
if (Volume.PersonalityTypes.SOURCE.name().equalsIgnoreCase(volume.getPersonality())) {
requestedVolumeCapactity = volume.getCapacity();
break;
}
}
computeProtectionCapacity(volumeURIs, requestedVolumeCapactity, false, isChangeVpool, null);
if (isChangeVpool) {
_log.info("Add Recoverpoint Protection to existing volume");
controller.changeVirtualPool(volumeDescriptors, task);
} else {
_log.info("Create RP volumes");
controller.createVolumes(volumeDescriptors, task);
}
}
} catch (Exception e) {
_log.error(e.getMessage(), e);
try {
// We want to return the volume back to its original state.
if (isChangeVpool || isChangeVpoolForProtectedVolume) {
Volume changeVpoolVolume = _dbClient.queryObject(Volume.class, rpProtectionRec.getVpoolChangeVolume());
VirtualPool oldVpool = _dbClient.queryObject(VirtualPool.class, oldVpoolId);
RPHelper.rollbackProtectionOnVolume(changeVpoolVolume, oldVpool, _dbClient);
}
for (URI volumeURI : volumeURIs) {
// completely rollback an existing volume (which the change vpool volume would be).
if (!volumeURI.equals(rpProtectionRec.getVpoolChangeVolume())) {
RPHelper.rollbackVolume(volumeURI, _dbClient);
}
}
} catch (Exception e2) {
// best effort for rollback; still need to set the tasks to error
_log.error("rollback create volume or change vpool failed");
_log.error(e2.getMessage(), e);
}
// Let's check to see if there are existing tasks, if so, put them in error.
if (taskList.getTaskList() != null && !taskList.getTaskList().isEmpty()) {
for (TaskResourceRep volumeTask : taskList.getTaskList()) {
volumeTask.setState(Operation.Status.error.name());
volumeTask.setMessage(e.getMessage());
Operation statusUpdate = new Operation(Operation.Status.error.name(), e.getMessage());
_dbClient.updateTaskOpStatus(Volume.class, volumeTask.getResource().getId(), task, statusUpdate);
}
}
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(volumeLabel);
}
return taskList;
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method changeVolumeVirtualPool.
@Override
public TaskList changeVolumeVirtualPool(List<Volume> volumes, VirtualPool vpool, VirtualPoolChangeParam vpoolChangeParam, String taskId) throws InternalException {
TaskList taskList = createTasksForVolumes(vpool, volumes, taskId);
// Check for common Vpool updates handled by generic code. It returns true if handled.
if (checkCommonVpoolUpdates(volumes, vpool, taskId)) {
return taskList;
}
// TODO Modified the code for COP-20817 Needs to revisit this code flow post release.
// Run placement algorithm and collect all volume descriptors to create srdf target volumes in array.
List<VolumeDescriptor> volumeDescriptorsList = new ArrayList<>();
for (Volume volume : volumes) {
// Check if the volume is normal without CG but new vPool with CG enabled.
if (NullColumnValueGetter.isNullURI(volume.getConsistencyGroup()) && (null != vpool.getMultivolumeConsistency() && vpool.getMultivolumeConsistency())) {
_log.info("VPool change is not permitted as volume is not part of CG but new VPool is consistency enabled.");
throw APIException.badRequests.changeToVirtualPoolNotSupportedForNonCGVolume(volume.getId(), vpool.getLabel());
}
if (!NullColumnValueGetter.isNullNamedURI(volume.getSrdfParent()) || (volume.getSrdfTargets() != null && !volume.getSrdfTargets().isEmpty())) {
throw APIException.badRequests.srdfVolumeVPoolChangeNotSupported(volume.getId());
}
// Get the storage system.
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
String systemType = storageSystem.getSystemType();
if (DiscoveredDataObject.Type.vmax.name().equals(systemType)) {
_log.debug("SRDF Protection VirtualPool change for vmax volume.");
volumeDescriptorsList.addAll(upgradeToSRDFTargetVolume(volume, vpool, vpoolChangeParam, taskId));
} else {
// not vmax volume
throw APIException.badRequests.srdfVolumeVPoolChangeNotSupported(volume.getId());
}
}
// CG Volume(srdf target) creation should execute as a single operation.
// Otherwise, CreateGroupReplica method to create srdf pair between source and target group will have count
// mismatch problem.
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.createVolumes(volumeDescriptorsList, taskId);
_log.info("Change virutal pool steps has been successfully inititated");
return taskList;
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method expandVolume.
/**
* {@inheritDoc}
*
* @throws ControllerException
*/
@Override
public void expandVolume(final Volume volume, final long newSize, final String taskId) throws ControllerException {
if (PersonalityTypes.TARGET.toString().equalsIgnoreCase(volume.getPersonality())) {
throw APIException.badRequests.expandSupportedOnlyOnSource(volume.getId());
}
List<VolumeDescriptor> descriptors = getVolumeDescriptorsForExpandVolume(volume, newSize);
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
// TODO : close JIRA CTRL-5335 SRDF expand needs to go via BlockOrchestrationController.
controller.expandVolume(descriptors, taskId);
}
use of com.emc.storageos.blockorchestrationcontroller.BlockOrchestrationController in project coprhd-controller by CoprHD.
the class AbstractBlockServiceApiImpl method deleteVolumes.
/**
* {@inheritDoc}
*
* @throws InternalException
*/
@Override
public void deleteVolumes(URI systemURI, List<URI> volumeURIs, String deletionType, String task) throws InternalException {
// Get volume descriptor for all volumes to be deleted.
List<VolumeDescriptor> volumeDescriptors = getDescriptorsForVolumesToBeDeleted(systemURI, volumeURIs, deletionType);
// the controller and delete the volumes.
if (VolumeDeleteTypeEnum.VIPR_ONLY.name().equals(deletionType)) {
// Do any cleanup necessary for the ViPR only delete.
cleanupForViPROnlyDelete(volumeDescriptors);
// Mark them inactive. Note that some of the volumes may be mirrors,
// which have a different database type.
List<VolumeDescriptor> descriptorsForMirrors = VolumeDescriptor.getDescriptors(volumeDescriptors, VolumeDescriptor.Type.BLOCK_MIRROR);
_dbClient.markForDeletion(_dbClient.queryObject(BlockMirror.class, VolumeDescriptor.getVolumeURIs(descriptorsForMirrors)));
List<VolumeDescriptor> descriptorsForVolumes = VolumeDescriptor.filterByType(volumeDescriptors, null, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_MIRROR });
_dbClient.markForDeletion(_dbClient.queryObject(Volume.class, VolumeDescriptor.getVolumeURIs(descriptorsForVolumes)));
// Delete the corresponding FCZoneReferences
for (URI volumeURI : volumeURIs) {
List<FCZoneReference> zoneReferences = CustomQueryUtility.queryActiveResourcesByAltId(_dbClient, FCZoneReference.class, "volumeUri", volumeURI.toString());
for (FCZoneReference zoneReference : zoneReferences) {
if (zoneReference != null) {
_dbClient.markForDeletion(zoneReference);
}
}
}
// Update the task status for each volume
for (URI volumeURI : volumeURIs) {
Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
Operation op = volume.getOpStatus().get(task);
op.ready("Volume succesfully deleted from ViPR");
volume.getOpStatus().updateTaskStatus(task, op);
_dbClient.updateObject(volume);
}
} else {
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.deleteVolumes(volumeDescriptors, task);
}
}
Aggregations