use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexDeviceController method prepareVolumeDescriptor.
/**
* Create a volume instance and VolumeDescriptor using the characteristics of the passed in source volume.
*
* @param source
* - The volume will be used to create the volume instance
* @param name
* - The new volume label
* @param type
* - VolumeDescriptor type
* @param size
* - The volume size
* @param isInternal
* -If the volume is internal
* @return - The newly created VolumeDescriptor
*/
private VolumeDescriptor prepareVolumeDescriptor(Volume source, String name, VolumeDescriptor.Type type, long size, boolean isInternal) {
Volume volume = new Volume();
volume.setId(URIUtil.createId(Volume.class));
volume.setLabel(name);
volume.setCapacity(size);
URI vpoolUri = source.getVirtualPool();
VirtualPool vpool = getDataObject(VirtualPool.class, vpoolUri, _dbClient);
volume.setThinlyProvisioned(VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(vpool.getSupportedProvisioningType()));
volume.setVirtualPool(vpool.getId());
URI projectId = source.getProject().getURI();
Project project = getDataObject(Project.class, projectId, _dbClient);
volume.setProject(new NamedURI(projectId, volume.getLabel()));
volume.setTenant(new NamedURI(project.getTenantOrg().getURI(), volume.getLabel()));
volume.setVirtualArray(source.getVirtualArray());
volume.setPool(source.getPool());
volume.setProtocol(source.getProtocol());
volume.setStorageController(source.getStorageController());
volume.setSystemType(source.getSystemType());
if (isInternal) {
volume.addInternalFlags(Flag.INTERNAL_OBJECT);
}
_dbClient.createObject(volume);
VirtualPoolCapabilityValuesWrapper capabilities = getCapabilities(source, size);
return new VolumeDescriptor(type, volume.getStorageController(), volume.getId(), volume.getPool(), capabilities);
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexDeviceController method buildVolumeMap.
/**
* Build a map of URI to cached Volumes for the underlying Storage Volumes that
* should be already present (and created).
*
* @param descriptors
* VolumeDescriptors
* @param VolmeDescriptor.Type
* used to filter descriptors
* @return Map<volumeURI, Volume>
*/
private Map<URI, Volume> buildVolumeMap(List<VolumeDescriptor> descriptors, VolumeDescriptor.Type type) {
Map<URI, Volume> volumeMap = new HashMap<URI, Volume>();
// Get only the descriptors for the type if specified.
if (type != null) {
descriptors = VolumeDescriptor.filterByType(descriptors, new VolumeDescriptor.Type[] { type }, new VolumeDescriptor.Type[] {});
}
for (VolumeDescriptor desc : descriptors) {
if (volumeMap.containsKey(desc.getVolumeURI()) == false) {
Volume volume = getDataObject(Volume.class, desc.getVolumeURI(), _dbClient);
volumeMap.put(desc.getVolumeURI(), volume);
}
}
return volumeMap;
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexDeviceController method addStepsForCreateVolumes.
/**
* {@inheritDoc}
* <p>
* Here we should have already created any underlying volumes. What remains to be done: 1. Export the underlying Storage Volumes from
* the array to the VPlex. 2. Create the Virtual volume. 3. If a consistency group was specified, then create the consistency group if
* it does not exist, then add the volumes. If it already exists, just add the volumes.
*/
@Override
public String addStepsForCreateVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
try {
// Get only the VPlex volumes from the descriptors.
List<VolumeDescriptor> vplexVolumes = VolumeDescriptor.filterByType(volumes, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_VIRT_VOLUME }, new VolumeDescriptor.Type[] {});
// If there are no VPlex volumes, just return
if (vplexVolumes.isEmpty()) {
_log.info("No VPLEX create volume steps required.");
return waitFor;
}
_log.info("Adding VPLEX create volume steps...");
// Segregate the volumes by Device.
Map<URI, List<VolumeDescriptor>> vplexDescMap = VolumeDescriptor.getDeviceMap(vplexVolumes);
// For each VPLEX to be provisioned (normally there is only one)
String lastStep = waitFor;
for (URI vplexURI : vplexDescMap.keySet()) {
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
// Build some needed maps to get started.
Type[] types = new Type[] { Type.BLOCK_DATA, Type.SRDF_SOURCE, Type.SRDF_EXISTING_SOURCE, Type.SRDF_TARGET };
Map<URI, StorageSystem> arrayMap = buildArrayMap(vplexSystem, volumes, types);
Map<URI, Volume> volumeMap = buildVolumeMap(vplexSystem, volumes, Type.VPLEX_VIRT_VOLUME);
// Set the project and tenant to those of an underlying volume.
// These are used to set the project and tenant of a new ExportGroup if needed.
Volume firstVolume = volumeMap.values().iterator().next();
Project vplexProject = VPlexUtil.lookupVplexProject(firstVolume, vplexSystem, _dbClient);
URI tenantURI = vplexProject.getTenantOrg().getURI();
_log.info("Project is {}, Tenant is {}", vplexProject.getId(), tenantURI);
try {
// Now we need to do the necessary zoning and export steps to ensure
// the VPlex can see these new backend volumes.
lastStep = createWorkflowStepsForBlockVolumeExport(workflow, vplexSystem, arrayMap, volumeMap, vplexProject.getId(), tenantURI, lastStep);
} catch (Exception ex) {
_log.error("Could not create volumes for vplex: " + vplexURI, ex);
TaskCompleter completer = new VPlexTaskCompleter(Volume.class, vplexURI, taskId, null);
ServiceError serviceError = VPlexApiException.errors.jobFailed(ex);
completer.error(_dbClient, serviceError);
throw ex;
}
Map<URI, URI> computeResourceMap = new HashMap<>();
List<VolumeDescriptor> vplexDescrs = vplexDescMap.get(vplexURI);
for (VolumeDescriptor descr : vplexDescrs) {
URI computeResourceURI = descr.getComputeResource();
if (computeResourceURI != null) {
computeResourceMap.put(descr.getVolumeURI(), computeResourceURI);
}
}
// Now create each of the Virtual Volumes that may be necessary.
List<URI> vplexVolumeURIs = VolumeDescriptor.getVolumeURIs(vplexDescrs);
// Now make a Step to create the VPlex Virtual volume.
// This will be done from this controller.
String stepId = workflow.createStepId();
lastStep = workflow.createStep(VPLEX_STEP, String.format("VPlex %s creating virtual volumes:%n%s", vplexSystem.getId().toString(), BlockDeviceController.getVolumesMsg(_dbClient, vplexVolumeURIs)), lastStep, vplexURI, vplexSystem.getSystemType(), this.getClass(), createVirtualVolumesMethod(vplexURI, vplexVolumeURIs, computeResourceMap), rollbackCreateVirtualVolumesMethod(vplexURI, vplexVolumeURIs, stepId), stepId);
// Get one of the vplex volumes so we can determine what ConsistencyGroupManager
// implementation to use.
Volume vol = getDataObject(Volume.class, vplexVolumeURIs.get(0), _dbClient);
ConsistencyGroupManager consistencyGroupManager = getConsistencyGroupManager(vol);
// Deal with CGs.
// Filter out any VPlex Volumes that front the SRDF targets for now.
List<URI> volsForCG = VPlexSrdfUtil.filterOutVplexSrdfTargets(_dbClient, vplexVolumeURIs);
if (!volsForCG.isEmpty()) {
lastStep = consistencyGroupManager.addStepsForCreateConsistencyGroup(workflow, lastStep, vplexSystem, volsForCG, false);
}
// If there are VPlex Volumes fronting SRDF targets, handle them.
// They will go into a separate CG that represents the SRDF targets.
// That CG will have already been generated?
volsForCG = VPlexSrdfUtil.returnVplexSrdfTargets(_dbClient, vplexVolumeURIs);
if (!volsForCG.isEmpty()) {
lastStep = consistencyGroupManager.addStepsForAddingVolumesToSRDFTargetCG(workflow, vplexSystem, volsForCG, lastStep);
}
_log.info("Added steps for creating consistency group");
}
return lastStep;
} catch (Exception ex) {
throw VPlexApiException.exceptions.addStepsForCreateVolumesFailed(ex);
}
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexDeviceController method addStepsForDeleteVolumes.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.impl.vplex.VplexController#deleteVolumes(java.net.URI, java.util.List,
* java.lang.String)
* <p>
* NOTE: The VolumeDescriptor list will not include the underlying Volumes. These have to be
* added to the VolumeDescriptor list before returning.
*/
@Override
public String addStepsForDeleteVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
try {
// Filter to get only the VPlex volumes.
List<VolumeDescriptor> vplexVolumes = VolumeDescriptor.filterByType(volumes, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_VIRT_VOLUME }, new VolumeDescriptor.Type[] {});
// If there are no VPlex volumes, just return
if (vplexVolumes.isEmpty()) {
return waitFor;
}
// Check to see if there are any volumes flagged to not be fully deleted.
// This will still remove the volume from its VPLEX CG and also clean up
// any Mirrors but will leave the Virtual Volume intact on the VPLEX.
List<VolumeDescriptor> doNotDeleteDescriptors = VolumeDescriptor.getDoNotDeleteDescriptors(vplexVolumes);
List<URI> doNotFullyDeleteVolumeList = VolumeDescriptor.getVolumeURIs(doNotDeleteDescriptors);
List<URI> allVplexVolumeURIs = VolumeDescriptor.getVolumeURIs(vplexVolumes);
// Segregate by device.
Map<URI, List<VolumeDescriptor>> vplexMap = VolumeDescriptor.getDeviceMap(vplexVolumes);
// but subsequent steps will wait on all the delete virtual volumes operations to complete.
for (URI vplexURI : vplexMap.keySet()) {
String nextStepWaitFor = waitFor;
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
// First validate that the backend volumes for these VPLEX volumes are
// the actual volumes used by the VPLEX volume on the VPLEX system. We
// add this verification in case changes were made outside ViPR, such
// as a migration, that caused the backend volumes to change. We don't
// want to delete a backend volume that may in fact be used some other
// VPLEX volume.
List<URI> vplexVolumeURIs = VolumeDescriptor.getVolumeURIs(vplexMap.get(vplexURI));
for (URI vplexVolumeURI : vplexVolumeURIs) {
Volume vplexVolume = _dbClient.queryObject(Volume.class, vplexVolumeURI);
if (vplexVolume == null || vplexVolume.getInactive() == true) {
continue;
}
// Skip validation if the volume was never successfully created.
if (vplexVolume.getDeviceLabel() == null) {
_log.info("Volume {} with Id {} was never created on the VPLEX as device label is null " + "hence skip validation on delete", vplexVolume.getLabel(), vplexVolume.getId());
continue;
}
// backend volume deletion failed.
try {
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplexSystem, _dbClient);
client.findVirtualVolume(vplexVolume.getDeviceLabel(), vplexVolume.getNativeId());
} catch (VPlexApiException ex) {
if (ex.getServiceCode() == ServiceCode.VPLEX_CANT_FIND_REQUESTED_VOLUME) {
_log.info("VPlex virtual volume: " + vplexVolume.getNativeId() + " has already been deleted; will skip validation");
continue;
} else {
_log.error("Exception finding Virtual Volume", ex);
throw ex;
}
}
createWorkflowStepToValidateVPlexVolume(workflow, vplexSystem, vplexVolumeURI, waitFor);
nextStepWaitFor = VALIDATE_VPLEX_VOLUME_STEP;
}
// If there are VPlex Volumes fronting SRDF targets, handle them.
// They will need to be removed from the CG that represents the SRDF targets.
List<URI> volsForTargetCG = VPlexSrdfUtil.returnVplexSrdfTargets(_dbClient, vplexVolumeURIs);
if (!volsForTargetCG.isEmpty()) {
URI volURI = volsForTargetCG.get(0);
Volume vol = VPlexControllerUtils.getDataObject(Volume.class, volURI, _dbClient);
ConsistencyGroupManager consistencyGroupManager = getConsistencyGroupManager(vol);
nextStepWaitFor = consistencyGroupManager.addStepsForRemovingVolumesFromSRDFTargetCG(workflow, vplexSystem, volsForTargetCG, nextStepWaitFor);
}
workflow.createStep(VPLEX_STEP, String.format("Delete VPlex Virtual Volumes:%n%s", BlockDeviceController.getVolumesMsg(_dbClient, vplexVolumeURIs)), nextStepWaitFor, vplexURI, DiscoveredDataObject.Type.vplex.name(), this.getClass(), deleteVirtualVolumesMethod(vplexURI, vplexVolumeURIs, doNotFullyDeleteVolumeList), rollbackMethodNullMethod(), null);
}
// Make a Map of array URI to StorageSystem
Map<URI, StorageSystem> arrayMap = new HashMap<URI, StorageSystem>();
// Make a Map of StorageSystem to a list of Volume URIs to be deleted for the next Step.
Map<URI, List<URI>> arrayVolumesMap = new HashMap<URI, List<URI>>();
// Make a list of ExportGroups that is used.
List<URI> exportGroupList = new ArrayList<URI>();
// Create a series of steps to remove the volume from the Export Groups.
// We leave the Export Groups, anticipating they will be used for other
// volumes or used later.
List<URI> backendVolURIs = new ArrayList<URI>();
for (URI vplexVolumeURI : allVplexVolumeURIs) {
Volume vplexVolume = _dbClient.queryObject(Volume.class, vplexVolumeURI);
if ((vplexVolume == null) || (vplexVolume.getInactive()) || (vplexVolume.isIngestedVolumeWithoutBackend(_dbClient)) || doNotFullyDeleteVolumeList.contains(vplexVolumeURI)) {
continue;
}
if (null == vplexVolume.getAssociatedVolumes()) {
_log.warn("VPLEX volume {} has no backend volumes. It was possibly ingested 'Virtual Volume Only'.", vplexVolume.forDisplay());
} else {
for (String assocVolumeId : vplexVolume.getAssociatedVolumes()) {
URI assocVolumeURI = new URI(assocVolumeId);
Volume volume = _dbClient.queryObject(Volume.class, assocVolumeURI);
if (volume == null || volume.getInactive() == true) {
continue;
}
StorageSystem array = arrayMap.get(volume.getStorageController());
if (array == null) {
array = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
arrayMap.put(array.getId(), array);
}
if (arrayVolumesMap.get(array.getId()) == null) {
arrayVolumesMap.put(array.getId(), new ArrayList<URI>());
}
arrayVolumesMap.get(array.getId()).add(volume.getId());
backendVolURIs.add(volume.getId());
}
}
// hence the backend volume for that mirror needs to be deleted as well.
if (vplexVolume.getMirrors() != null && !(vplexVolume.getMirrors().isEmpty())) {
for (String mirrorId : vplexVolume.getMirrors()) {
VplexMirror vplexMirror = _dbClient.queryObject(VplexMirror.class, URI.create(mirrorId));
if (vplexMirror == null || vplexMirror.getInactive() == true || vplexMirror.getAssociatedVolumes() == null) {
continue;
}
for (String assocVolumeId : vplexMirror.getAssociatedVolumes()) {
URI assocVolumeURI = new URI(assocVolumeId);
Volume volume = _dbClient.queryObject(Volume.class, assocVolumeURI);
if (volume == null || volume.getInactive() == true) {
continue;
}
StorageSystem array = arrayMap.get(volume.getStorageController());
if (array == null) {
array = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
arrayMap.put(array.getId(), array);
}
if (arrayVolumesMap.get(array.getId()) == null) {
arrayVolumesMap.put(array.getId(), new ArrayList<URI>());
}
arrayVolumesMap.get(array.getId()).add(volume.getId());
backendVolURIs.add(volume.getId());
}
}
}
}
waitFor = VPLEX_STEP;
if (vplexAddUnexportVolumeWfSteps(workflow, VPLEX_STEP, backendVolURIs, exportGroupList)) {
waitFor = UNEXPORT_STEP;
}
return waitFor;
} catch (Exception ex) {
throw VPlexApiException.exceptions.addStepsForDeleteVolumesFailed(ex);
}
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class ReplicaDeviceController method createReplicaIfCGHasReplica.
/**
* Creates replica snap/clone/mirror for the newly created volume, if the existing CG Volume has any replica.
*
* @param workflow
* @param waitFor
* @param volumeDescriptors
* @param cgURI
* @return
*/
private String createReplicaIfCGHasReplica(Workflow workflow, String waitFor, List<VolumeDescriptor> volumeDescriptors, URI cgURI) {
log.info("CG URI {}", cgURI);
if (volumeDescriptors != null && !volumeDescriptors.isEmpty()) {
VolumeDescriptor firstVolumeDescriptor = volumeDescriptors.get(0);
if (firstVolumeDescriptor != null && cgURI != null) {
// find member volumes in the group
BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, cgURI);
List<Volume> existingVolumesInCG = BlockConsistencyGroupUtils.getActiveNativeVolumesInCG(cg, _dbClient);
URI storage = existingVolumesInCG.get(0).getStorageController();
// We will not end up in more than 1 RG within a CG, hence taking System from CG is fine.
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storage);
if (checkIfCGHasCloneReplica(existingVolumesInCG)) {
log.info("Adding clone steps for create {} volumes", firstVolumeDescriptor.getType());
// create new clones for the newly created volumes
// add the created clones to clone groups
waitFor = createCloneSteps(workflow, waitFor, volumeDescriptors, existingVolumesInCG, cgURI);
}
if (checkIfCGHasMirrorReplica(existingVolumesInCG)) {
log.info("Adding mirror steps for create {} volumes", firstVolumeDescriptor.getType());
// create new mirrors for the newly created volumes
// add the created mirrors to mirror groups
waitFor = createMirrorSteps(workflow, waitFor, volumeDescriptors, existingVolumesInCG, cgURI);
}
List<BlockSnapshotSession> sessions = getSnapSessionsForCGVolume(existingVolumesInCG.get(0));
boolean isExistingCGSnapShotAvailable = checkIfCGHasSnapshotReplica(existingVolumesInCG);
boolean isExistingCGSnapSessionAvailable = sessions != null && !sessions.isEmpty();
boolean isVMAX3ExistingVolume = ControllerUtils.isVmaxVolumeUsing803SMIS(existingVolumesInCG.get(0), _dbClient);
List<URI> volumeListtoAddURIs = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
List<Volume> volumeListToAdd = ControllerUtils.queryVolumesByIterativeQuery(_dbClient, volumeListtoAddURIs);
if (isVMAX3ExistingVolume) {
if (isVMAX3VolumeHasSessionOnly(isExistingCGSnapSessionAvailable, isExistingCGSnapShotAvailable)) {
log.info("Existing CG only has Snap Session, adding snap session steps for adding volumes");
processSnapSessions(existingVolumesInCG, workflow, waitFor, volumeListToAdd);
} else if (isVMAX3VolumeHasSnapshotOnly(isExistingCGSnapSessionAvailable, isExistingCGSnapShotAvailable)) {
// create new snapshots for the newly added volumes
// add the created snapshots to snapshot groups
Set<String> snapGroupNames = ControllerUtils.getSnapshotReplicationGroupNames(existingVolumesInCG, _dbClient);
for (String snapGroupName : snapGroupNames) {
// we can use the same storage system as RG--> CG is 1:1 mapping
log.info("Existing CG only has Snapshots, adding snapshot steps for existing snap group {} adding volumes", snapGroupName);
waitFor = addSnapshotsToReplicationGroupStep(workflow, waitFor, storageSystem, volumeListToAdd, snapGroupName, cgURI);
}
} else if (isVMAX3VolumeHasSessionAndSnapshot(isExistingCGSnapSessionAvailable, isExistingCGSnapShotAvailable)) {
log.info("Existing CG has both Sessions and linked targets, adding snapshot and session steps");
processSnapSessionsAndLinkedTargets(existingVolumesInCG, workflow, waitFor, volumeListToAdd, cgURI);
}
} else if (isExistingCGSnapShotAvailable) {
// non VMAX3 volume
log.info("Adding snapshot steps for adding volumes");
// create new snapshots for the newly added volumes
// add the created snapshots to snapshot groups
Set<String> snapGroupNames = ControllerUtils.getSnapshotReplicationGroupNames(existingVolumesInCG, _dbClient);
for (String snapGroupName : snapGroupNames) {
waitFor = addSnapshotsToReplicationGroupStep(workflow, waitFor, storageSystem, volumeListToAdd, snapGroupName, cgURI);
}
}
}
}
return waitFor;
}
Aggregations