use of com.emc.storageos.recoverpoint.requests.CreateCopyParams in project coprhd-controller by CoprHD.
the class RPDeviceController method getCGRequestParams.
/**
* Create the RP Client consistency group request object based on the incoming prepared volumes.
*
* @param volumeDescriptors
* volume descriptor objects
* @param rpSystem
* @return RP request to create CG
* @throws DatabaseException
*/
private CGRequestParams getCGRequestParams(List<VolumeDescriptor> volumeDescriptors, ProtectionSystem rpSystem) throws DatabaseException {
_log.info("Creating CG Request param...");
// Maps of replication set request objects, where the key is the rset name itself
Map<String, CreateRSetParams> rsetParamsMap = new HashMap<String, CreateRSetParams>();
// Maps of the copy request objects, where the key is the copy name itself
Map<String, CreateCopyParams> copyParamsMap = new HashMap<String, CreateCopyParams>();
// The parameters we need at the CG Level that we can only get from looking at the Volumes
Project project = null;
String cgName = null;
Set<String> productionCopies = new HashSet<String>();
BlockConsistencyGroup cg = null;
String copyMode = null;
String rpoType = null;
Long rpoValue = null;
int maxNumberOfSnapShots = 0;
Map<URI, Volume> volumeMap = new HashMap<URI, Volume>();
// Sort the volume descriptors using the natural order of the enum.
// In this case sort as:
// SOURCE, TARGET, JOURNAL
// We want SOURCE volumes to be processed first below to populate the
// productionCopies in order.
VolumeDescriptor.sortByType(volumeDescriptors);
// Next create all of the request objects we need
for (VolumeDescriptor volumeDescriptor : volumeDescriptors) {
Volume volume = null;
if (volumeMap.containsKey(volumeDescriptor.getVolumeURI())) {
volume = volumeMap.get(volumeDescriptor.getVolumeURI());
} else {
volume = _dbClient.queryObject(Volume.class, volumeDescriptor.getVolumeURI());
volumeMap.put(volume.getId(), volume);
}
boolean isMetroPoint = RPHelper.isMetroPointVolume(_dbClient, volume);
boolean isRPSource = RPHelper.isRPSource(volumeDescriptor);
boolean isRPTarget = RPHelper.isRPTarget(volumeDescriptor);
boolean extraParamsGathered = false;
if (volumeDescriptor.getCapabilitiesValues() != null) {
maxNumberOfSnapShots = volumeDescriptor.getCapabilitiesValues().getRPMaxSnaps();
}
// Set up the source and target volumes in their respective replication sets
if (isRPSource || isRPTarget) {
// Gather the extra params we need (once is sufficient)
if (isRPSource && !extraParamsGathered) {
project = _dbClient.queryObject(Project.class, volume.getProject());
cg = _dbClient.queryObject(BlockConsistencyGroup.class, volumeDescriptor.getCapabilitiesValues().getBlockConsistencyGroup());
cgName = cg.getCgNameOnStorageSystem(rpSystem.getId());
if (cgName == null) {
cgName = CG_NAME_PREFIX + cg.getLabel();
}
copyMode = volumeDescriptor.getCapabilitiesValues().getRpCopyMode();
rpoType = volumeDescriptor.getCapabilitiesValues().getRpRpoType();
rpoValue = volumeDescriptor.getCapabilitiesValues().getRpRpoValue();
// Flag so we only grab this information once
extraParamsGathered = true;
}
if (isMetroPoint && isRPSource) {
// we need to handle metropoint request a bit differently.
// since the same metro volume will be part of 2 (production) copies in the replication set,
// we need to fetch the correct internal site names and other site related parameters from the
// backing volume.
StringSet backingVolumes = volume.getAssociatedVolumes();
if (null == backingVolumes || backingVolumes.isEmpty()) {
_log.error("VPLEX volume {} has no backend volumes.", volume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(volume.forDisplay());
}
for (String backingVolumeStr : backingVolumes) {
Volume backingVolume = _dbClient.queryObject(Volume.class, URI.create(backingVolumeStr));
CreateVolumeParams volumeParams = populateVolumeParams(volume.getId(), volume.getStorageController(), backingVolume.getVirtualArray(), backingVolume.getInternalSiteName(), true, backingVolume.getRpCopyName(), RPHelper.getRPWWn(volume.getId(), _dbClient), maxNumberOfSnapShots);
_log.info(String.format("Creating RSet Param for MetroPoint RP PROD - VOLUME: [%s] Name: [%s]", backingVolume.getLabel(), volume.getRSetName()));
populateRsetsMap(rsetParamsMap, volumeParams, volume);
productionCopies.add(backingVolume.getRpCopyName());
}
} else {
CreateVolumeParams volumeParams = populateVolumeParams(volume.getId(), volume.getStorageController(), volume.getVirtualArray(), volume.getInternalSiteName(), isRPSource, volume.getRpCopyName(), RPHelper.getRPWWn(volume.getId(), _dbClient), maxNumberOfSnapShots);
String type = isRPSource ? "PROD" : "TARGET";
_log.info(String.format("Creating RSet Param for RP %s - VOLUME: [%s] Name: [%s]", type, volume.getLabel(), volume.getRSetName()));
populateRsetsMap(rsetParamsMap, volumeParams, volume);
if (isRPSource) {
productionCopies.add(volume.getRpCopyName());
}
}
}
// Set up the journal volumes in the copy objects
if (volumeDescriptor.getType().equals(VolumeDescriptor.Type.RP_JOURNAL) || volumeDescriptor.getType().equals(VolumeDescriptor.Type.RP_VPLEX_VIRT_JOURNAL)) {
if (cgName == null) {
project = _dbClient.queryObject(Project.class, volume.getProject());
cg = _dbClient.queryObject(BlockConsistencyGroup.class, volumeDescriptor.getCapabilitiesValues().getBlockConsistencyGroup());
cgName = cg.getCgNameOnStorageSystem(rpSystem.getId());
if (cgName == null) {
cgName = CG_NAME_PREFIX + cg.getLabel();
}
}
CreateVolumeParams volumeParams = populateVolumeParams(volume.getId(), volume.getStorageController(), volume.getVirtualArray(), volume.getInternalSiteName(), RPHelper.isProductionJournal(productionCopies, volume), volume.getRpCopyName(), RPHelper.getRPWWn(volume.getId(), _dbClient), maxNumberOfSnapShots);
String key = volume.getRpCopyName();
_log.info(String.format("Creating Copy Param for RP JOURNAL: VOLUME - [%s] Name: [%s]", volume.getLabel(), key));
if (copyParamsMap.containsKey(key)) {
copyParamsMap.get(key).getJournals().add(volumeParams);
} else {
CreateCopyParams copyParams = new CreateCopyParams();
copyParams.setName(key);
copyParams.setJournals(new ArrayList<CreateVolumeParams>());
copyParams.getJournals().add(volumeParams);
copyParamsMap.put(key, copyParams);
}
}
}
// Set up the CG Request
CGRequestParams cgParams = new CGRequestParams();
cgParams.setCopies(new ArrayList<CreateCopyParams>());
cgParams.getCopies().addAll(copyParamsMap.values());
cgParams.setRsets(new ArrayList<CreateRSetParams>());
cgParams.getRsets().addAll(rsetParamsMap.values());
cgParams.setCgName(cgName);
cgParams.setCgUri(cg.getId());
cgParams.setProject(project.getId());
cgParams.setTenant(project.getTenantOrg().getURI());
CGPolicyParams policyParams = new CGPolicyParams();
policyParams.setCopyMode(copyMode);
policyParams.setRpoType(rpoType);
policyParams.setRpoValue(rpoValue);
cgParams.setCgPolicy(policyParams);
_log.info(String.format("CG Request param complete:%n %s", cgParams));
return cgParams;
}
use of com.emc.storageos.recoverpoint.requests.CreateCopyParams in project coprhd-controller by CoprHD.
the class RPDeviceController method generateStorageSystemExportMaps.
/**
* Helper method that consolidates all of the volumes into storage systems to make the minimum amount of export
* calls.
*
* @param volumeDescriptors
*
* @param recommendation
*/
private Collection<RPExport> generateStorageSystemExportMaps(CGRequestParams cgParams, List<VolumeDescriptor> volumeDescriptors) {
_log.info("Generate the storage system exports...START");
Map<String, RPExport> rpExportMap = new HashMap<String, RPExport>();
// volume.
for (CreateRSetParams rset : cgParams.getRsets()) {
_log.info("Replication Set: " + rset.getName());
Set<CreateVolumeParams> createVolumeParams = new HashSet<CreateVolumeParams>();
createVolumeParams.addAll(rset.getVolumes());
List<URI> processedRsetVolumes = new ArrayList<URI>();
for (CreateVolumeParams rsetVolume : createVolumeParams) {
// the second reference and continue processing.
if (processedRsetVolumes.contains(rsetVolume.getVolumeURI())) {
continue;
}
processedRsetVolumes.add(rsetVolume.getVolumeURI());
// Retrieve the volume
Volume volume = _dbClient.queryObject(Volume.class, rsetVolume.getVolumeURI());
_log.info(String.format("Generating Exports for %s volume [%s](%s)...", volume.getPersonality().toString(), volume.getLabel(), volume.getId()));
// List of volumes to export, normally just one volume will be added to this list unless
// we have a MetroPoint config. In which case we would have two (each leg of the VPLEX).
Set<Volume> volumes = new HashSet<Volume>();
// Check to see if this is a SOURCE volume
if (volume.checkPersonality(PersonalityTypes.SOURCE.toString())) {
// Check the vpool to ensure we're exporting the source volume to the correct storage system.
// In the case of MetroPoint, however, it could be a change vpool. In that case get the change
// vpool new vpool.
URI vpoolURI = null;
if (VolumeDescriptor.getVirtualPoolChangeVolume(volumeDescriptors) != null) {
vpoolURI = getVirtualPoolChangeNewVirtualPool(volumeDescriptors);
} else {
vpoolURI = volume.getVirtualPool();
}
VirtualPool vpool = _dbClient.queryObject(VirtualPool.class, vpoolURI);
// In an RP+VPLEX distributed setup, the user can choose to protect only the HA side,
// so we would export only to the HA StorageView on the VPLEX.
boolean exportToHASideOnly = VirtualPool.isRPVPlexProtectHASide(vpool);
if (exportToHASideOnly || VirtualPool.vPoolSpecifiesMetroPoint(vpool)) {
_log.info("Export is for {}. Basing export(s) off backing VPLEX volumes for RP Source volume [{}].", (exportToHASideOnly ? "RP+VPLEX distributed HA side only" : "MetroPoint"), volume.getLabel());
// If MetroPoint is enabled we need to create exports for each leg of the VPLEX.
// Get the associated volumes and add them to the list so we can create RPExports
// for each one.
StringSet backingVolumes = volume.getAssociatedVolumes();
if (null == backingVolumes || backingVolumes.isEmpty()) {
_log.error("VPLEX volume {} has no backend volumes.", volume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(volume.forDisplay());
}
for (String volumeId : backingVolumes) {
Volume vol = _dbClient.queryObject(Volume.class, URI.create(volumeId));
// Check to see if we only want to export to the HA side of the RP+VPLEX setup
if (exportToHASideOnly) {
if (!vol.getVirtualArray().toString().equals(vpool.getHaVarrayConnectedToRp())) {
continue;
}
}
volumes.add(vol);
}
} else {
// Not RP+VPLEX distributed or MetroPoint, add the volume and continue on.
volumes.add(volume);
}
} else {
// Not a SOURCE volume, add the volume and continue on.
volumes.add(volume);
}
for (Volume vol : volumes) {
URI storageSystem = rsetVolume.getStorageSystem();
String rpSiteName = vol.getInternalSiteName();
URI varray = vol.getVirtualArray();
// Intentionally want the ID of the parent volume, not the inner looping vol.
// This is because we could be trying to create exports for MetroPoint.
URI volumeId = volume.getId();
// Generate a unique key based on Storage System + Internal Site + Virtual Array
String key = storageSystem.toString() + rpSiteName + varray.toString();
// Try and get an existing rp export object from the map using the key
RPExport rpExport = rpExportMap.get(key);
// If it doesn't exist, create the entry and add it to the map with the key
if (rpExport == null) {
rpExport = new RPExport(storageSystem, rpSiteName, varray);
rpExportMap.put(key, rpExport);
}
// Add host information to the export if specified
if (vol.checkPersonality(Volume.PersonalityTypes.SOURCE.name())) {
for (VolumeDescriptor desc : volumeDescriptors) {
if (desc.getVolumeURI().equals(vol.getId())) {
if (!NullColumnValueGetter.isNullURI(desc.getComputeResource())) {
_log.info("Add Host/Cluster information for source volume exports");
rpExport.setComputeResource(desc.getComputeResource());
break;
}
}
}
}
_log.info(String.format("Adding %s volume [%s](%s) to export: %s", volume.getPersonality().toString(), volume.getLabel(), volume.getId(), rpExport.toString()));
rpExport.getVolumes().add(volumeId);
}
}
}
// created for the journal.
for (CreateCopyParams copy : cgParams.getCopies()) {
_log.info("Copy: " + copy.getName());
for (CreateVolumeParams journalVolume : copy.getJournals()) {
// Retrieve the volume
Volume volume = _dbClient.queryObject(Volume.class, journalVolume.getVolumeURI());
_log.info(String.format("Generating export for %s volume [%s](%s)...", volume.getPersonality().toString(), volume.getLabel(), volume.getId()));
URI storageSystem = journalVolume.getStorageSystem();
String rpSiteName = volume.getInternalSiteName();
URI varray = volume.getVirtualArray();
URI volumeId = volume.getId();
// Generate a unique key based on Storage System + Internal Site + Virtual Array
String key = storageSystem.toString() + rpSiteName + varray.toString();
// Try and get an existing rp export object from the map using the key
// If a separate varray is specified is for journals, a new entry will be created.
RPExport rpExport = rpExportMap.get(key);
// If it doesn't exist, create the entry and add it to the map with the key
if (rpExport == null) {
_log.info("RPExport is for journals only");
rpExport = new RPExport(storageSystem, rpSiteName, varray);
rpExport.setIsJournalExport(true);
rpExportMap.put(key, rpExport);
}
_log.info(String.format("Adding %s volume [%s](%s) to export: %s", volume.getPersonality().toString(), volume.getLabel(), volume.getId(), rpExport.toString()));
rpExport.getVolumes().add(volumeId);
}
}
_log.info("Generate the storage system exports...END");
return rpExportMap.values();
}
Aggregations