use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RecoverPointSchedulerTest method fireProtectionPlacementRulesCLRSite1NoCapacityTest.
@Test
public void fireProtectionPlacementRulesCLRSite1NoCapacityTest() throws IllegalArgumentException, IllegalAccessException, InvocationTargetException, ClassNotFoundException, SecurityException, NoSuchMethodException {
ProtectionSystem ps = buildProtectionSystemWithCapacity();
// set the remote site vol capacity to 1 less than full.
ps.getSiteVolumeCount().remove("1");
ps.getSiteVolumeCount().put("1", "510");
RPProtectionRecommendation ppm1 = new RPProtectionRecommendation();
fillRecommendationObject(ppm1, ps, "1", "2", nh1, nh2, pool1, poolA, 1);
RPProtectionRecommendation ppm2 = new RPProtectionRecommendation();
fillRecommendationObject(ppm2, ps, "1", "2", nh1, nh2, pool1, poolB, 1);
RPProtectionRecommendation ppm3 = new RPProtectionRecommendation();
fillRecommendationObject(ppm3, ps, "1", "2", nh1, nh2, pool2, poolA, 2);
RPProtectionRecommendation ppm4 = new RPProtectionRecommendation();
fillRecommendationObject(ppm4, ps, "1", "2", nh1, nh2, pool3, poolB, 4);
assertTrue(invokeFireProtectionPlacementRules(ps, ppm1, 1));
assertTrue(invokeFireProtectionPlacementRules(ps, ppm2, 1));
assertTrue(!invokeFireProtectionPlacementRules(ps, ppm3, 2));
assertTrue(!invokeFireProtectionPlacementRules(ps, ppm4, 4));
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class BlockRecoverPointIngestOrchestrator method performRPExportIngestion.
/**
* RecoverPoint volumes are expected to have export masks where the volume is exported to
* a RecoverPoint site. Therefore every RP volume (sources, targets, journals) will need to
* go through this code and have their export mask ingested. Even if the mask has already been
* ingested by a previous volume ingestion, this method still needs to update the ExportGroup and
* ExportMask objects to reflect the newly ingested volume as part of its management.
*
* @param volumeContext the RecoverPointVolumeIngestionContext for the volume currently being ingested
* @param unManagedVolume unmanaged volume
* @param volume managed volume
* @return managed volume with export ingested
*/
private void performRPExportIngestion(IngestionRequestContext parentRequestContext, RecoverPointVolumeIngestionContext volumeContext, UnManagedVolume unManagedVolume, Volume volume) {
_logger.info("starting RecoverPoint export ingestion for volume {}", volume.forDisplay());
Project project = volumeContext.getProject();
ProtectionSystem protectionSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
List<UnManagedExportMask> unManagedRPExportMasks = findUnManagedRPExportMask(protectionSystem, unManagedVolume);
if (unManagedRPExportMasks.isEmpty()) {
_logger.error("Could not find any unmanaged export masks associated with volume: " + unManagedVolume.getLabel());
throw IngestionException.exceptions.noUnManagedExportMaskFound(unManagedVolume.getNativeGuid());
}
// Keep a map for internal site name name and varray
Map<String, VirtualArray> internalSiteToVarrayMap = new HashMap<String, VirtualArray>();
internalSiteToVarrayMap.put(volume.getInternalSiteName(), volumeContext.getVarray(unManagedVolume));
// If this is a MetroPoint volume we're going to have multiple ExportMasks/ExportGroups to deal with.
// We'll need to query the backend volumes for extra info to populate internalSiteToVarrayMap so
// we can properly line up the ExportMasks/ExportGroups.
boolean metropoint = RPHelper.isMetroPointVolume(_dbClient, volume);
if (metropoint) {
// We need the VPLEX ingest context to get the backend volume info
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
for (String associatedVolumeIdStr : vplexVolumeContext.getAssociatedVolumeIds(volume)) {
// Find the associated volumes using the context maps or the db if they are already there
Volume associatedVolume = VolumeIngestionUtil.findVolume(_dbClient, vplexVolumeContext.getBlockObjectsToBeCreatedMap(), vplexVolumeContext.getDataObjectsToBeUpdatedMap(), associatedVolumeIdStr);
String internalSiteName = associatedVolume.getInternalSiteName();
// If we don't already have an entry for this internal site name, let's add it now.
if (!internalSiteToVarrayMap.containsKey(internalSiteName)) {
internalSiteToVarrayMap.put(internalSiteName, _dbClient.queryObject(VirtualArray.class, associatedVolume.getVirtualArray()));
}
}
}
// this will more than likely only loop once.
for (Entry<String, VirtualArray> entry : internalSiteToVarrayMap.entrySet()) {
String internalSiteName = entry.getKey();
VirtualArray virtualArray = entry.getValue();
UnManagedExportMask em = null;
if (metropoint) {
// Since we're flagged for MetroPoint we need to determine which ExportMask to use.
// We need the MetroPoint volume to be added to BOTH ExportGroups that represent the
// two Storage Views on VPLEX for cluster-1 and cluster-2.
// So let's use the varray to find the cluster we're looking for on this pass and match
// it to the maskingViewParth of the UnManagedExportMask.
// This should line things up roughly as:
// VPLEX Storage View 1 -> VPLEX Cluster1 + RPA1
// VPLEX Storage View 2 -> VPLEX Cluster2 + RPA2
String vplexCluster = ConnectivityUtil.getVplexClusterForVarray(virtualArray.getId(), storageSystem.getId(), _dbClient);
// First try and match based on UnManagedExportMask ports
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
for (String portUri : exportMask.getKnownStoragePortUris()) {
StoragePort port = _dbClient.queryObject(StoragePort.class, URI.create(portUri));
if (port != null && !port.getInactive()) {
String vplexClusterForMask = ConnectivityUtil.getVplexClusterOfPort(port);
if (vplexCluster.equals(vplexClusterForMask)) {
em = exportMask;
break;
}
}
}
if (em != null) {
break;
}
}
if (em == null) {
// It really shouldn't come to this, but leaving this code just in case.
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
if (exportMask.getMaskingViewPath().contains("cluster-" + vplexCluster)) {
em = exportMask;
break;
}
}
}
} else {
em = unManagedRPExportMasks.get(0);
}
// If the mask for ingested volume is in a mask that contains JOURNAL keyword, make sure the ExportGroup created contains
// that internal flag.
boolean isJournalExport = false;
if (em.getMaskName().toLowerCase().contains(VolumeIngestionUtil.RP_JOURNAL)) {
isJournalExport = true;
}
String exportGroupGeneratedName = RPHelper.generateExportGroupName(protectionSystem, storageSystem, internalSiteName, virtualArray, isJournalExport);
ExportGroup exportGroup = VolumeIngestionUtil.verifyExportGroupExists(parentRequestContext, exportGroupGeneratedName, project.getId(), em.getKnownInitiatorUris(), virtualArray.getId(), _dbClient);
boolean exportGroupCreated = false;
if (null == exportGroup) {
exportGroupCreated = true;
Integer numPaths = em.getZoningMap().size();
_logger.info("Creating Export Group with label {}", em.getMaskName());
exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, virtualArray, project, numPaths, isJournalExport);
}
if (null != exportGroup) {
// check if the ExportGroup has already been fetched
ExportGroup loadedExportGroup = parentRequestContext.findExportGroup(exportGroup.getLabel(), project.getId(), virtualArray.getId(), null, null);
if (null != loadedExportGroup) {
exportGroup = loadedExportGroup;
}
}
volumeContext.setExportGroup(exportGroup);
volumeContext.setExportGroupCreated(exportGroupCreated);
volumeContext.getRpExportGroupMap().put(exportGroup, exportGroupCreated);
// set RP device initiators to be used as the "host" for export mask ingestion
List<Initiator> initiators = new ArrayList<Initiator>();
Iterator<Initiator> initiatorItr = _dbClient.queryIterativeObjects(Initiator.class, URIUtil.toURIList(em.getKnownInitiatorUris()));
while (initiatorItr.hasNext()) {
initiators.add(initiatorItr.next());
}
volumeContext.setDeviceInitiators(initiators);
// find the ingest export strategy and call into for this unmanaged export mask
IngestExportStrategy ingestStrategy = ingestStrategyFactory.buildIngestExportStrategy(unManagedVolume);
volume = ingestStrategy.ingestExportMasks(unManagedVolume, volume, volumeContext);
if (null == volume) {
// ingestion did not succeed, but in case it wasn't, throw one
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
}
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class VolumeIngestionUtil method isRpExportMask.
/**
* Returns true if the given UnManagedExportMask is for a RecoverPoint Export.
*
* @param uem the UnManagedExportMask to check
* @param dbClient a reference to the database client
* @return true if the given UnManagedExportMask is for a RecoverPoint Export
*/
public static boolean isRpExportMask(UnManagedExportMask uem, DbClient dbClient) {
for (String wwn : uem.getKnownInitiatorNetworkIds()) {
List<URI> protectionSystemUris = dbClient.queryByType(ProtectionSystem.class, true);
List<ProtectionSystem> protectionSystems = dbClient.queryObject(ProtectionSystem.class, protectionSystemUris);
for (ProtectionSystem protectionSystem : protectionSystems) {
for (Entry<String, AbstractChangeTrackingSet<String>> siteInitEntry : protectionSystem.getSiteInitiators().entrySet()) {
if (siteInitEntry.getValue().contains(wwn)) {
_logger.info("this is a RecoverPoint related UnManagedExportMask: " + uem.getMaskName());
return true;
}
}
}
}
return false;
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class TaskLockingCompleter method lockCG.
/**
* Lock the entire CG based on this volume.
*
* @param dbClient db client
* @param locker locker service
* @return true if lock was acquired
*/
public boolean lockCG(DbClient dbClient, ControllerLockingService locker) {
// Figure out the lock ID (rpSystemInstallationID:CGName)
URI volumeId = getId();
// If this is a snapshot object completer, get the volume id from the snapshot.
if (URIUtil.isType(getId(), BlockSnapshot.class)) {
BlockSnapshot snapshot = dbClient.queryObject(BlockSnapshot.class, getId());
volumeId = snapshot.getParent().getURI();
} else if (URIUtil.isType(getId(), BlockConsistencyGroup.class)) {
List<Volume> cgVolumes = CustomQueryUtility.queryActiveResourcesByConstraint(dbClient, Volume.class, getVolumesByConsistencyGroup(getId()));
if (cgVolumes != null && !cgVolumes.isEmpty()) {
// Get the first volume in the CG
volumeId = cgVolumes.get(0).getId();
}
}
// Figure out the lock ID (rpSystemInstallationID:CGName)
Volume volume = dbClient.queryObject(Volume.class, volumeId);
if (volume != null && locker != null) {
if (volume.getProtectionController() != null && volume.getProtectionSet() != null) {
ProtectionSystem rpSystem = dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
ProtectionSet protectionSet = dbClient.queryObject(ProtectionSet.class, volume.getProtectionSet());
if (rpSystem != null && protectionSet != null && rpSystem.getInstallationId() != null && protectionSet.getLabel() != null) {
// Unlock the CG based on this volume
String lockName = rpSystem.getInstallationId() + LOCK_SEPARATOR + protectionSet.getLabel();
if (locker.acquirePersistentLock(lockName, _opId, 5)) {
_logger.info("Acquired lock: " + lockName);
lockedName = lockName;
return true;
} else {
_logger.info("Failed to acquire lock: " + lockName);
}
}
} else if (volume.getProtectionSet() == null) {
_logger.info("Lock not required, no CG in use");
lockedName = null;
return true;
}
}
return false;
}
use of com.emc.storageos.db.client.model.ProtectionSystem in project coprhd-controller by CoprHD.
the class RPUnManagedObjectDiscoverer method discoverUnManagedObjects.
/**
* Discovers the RP CGs and all the volumes therein. It updates/creates the UnManagedProtectionSet
* objects and updates (if it exists) the UnManagedVolume objects with RP information needed for
* ingestion
*
* @param accessProfile access profile
* @param dbClient db client
* @param partitionManager partition manager
* @throws Exception
*/
public void discoverUnManagedObjects(AccessProfile accessProfile, DbClient dbClient, PartitionManager partitionManager) throws Exception {
this.partitionManager = partitionManager;
log.info("Started discovery of UnManagedVolumes for system {}", accessProfile.getSystemId());
ProtectionSystem protectionSystem = dbClient.queryObject(ProtectionSystem.class, accessProfile.getSystemId());
if (protectionSystem == null) {
log.error("Discovery is not run! Protection System not found: " + accessProfile.getSystemId());
return;
}
RecoverPointClient rp = RPHelper.getRecoverPointClient(protectionSystem);
unManagedCGsInsert = new ArrayList<UnManagedProtectionSet>();
unManagedCGsUpdate = new ArrayList<UnManagedProtectionSet>();
unManagedVolumesToDelete = new ArrayList<UnManagedVolume>();
unManagedVolumesToUpdateByWwn = new HashMap<String, UnManagedVolume>();
unManagedCGsReturnedFromProvider = new HashSet<URI>();
// Get all of the consistency groups (and their volumes) from RP
Set<GetCGsResponse> cgs = rp.getAllCGs();
if (cgs == null) {
log.warn("No CGs were found on protection system: " + protectionSystem.getLabel());
return;
}
// This section of code allows us to cache XIO native GUID to workaround an issue
// with RP's understanding of XIO volume WWNs (128-bit) and the rest of the world's
// understanding of the XIO volume WWN once it's exported (64-bit)
Map<String, String> rpWwnToNativeWwn = new HashMap<String, String>();
List<URI> storageSystemIds = dbClient.queryByType(StorageSystem.class, true);
List<String> storageNativeIdPrefixes = new ArrayList<String>();
if (storageSystemIds != null) {
Iterator<StorageSystem> storageSystemsItr = dbClient.queryIterativeObjects(StorageSystem.class, storageSystemIds);
while (storageSystemsItr.hasNext()) {
StorageSystem storageSystem = storageSystemsItr.next();
if (storageSystem.getSystemType().equalsIgnoreCase(Type.xtremio.name())) {
storageNativeIdPrefixes.add(storageSystem.getNativeGuid());
}
}
}
for (GetCGsResponse cg : cgs) {
try {
log.info("Processing returned CG: " + cg.getCgName());
boolean newCG = false;
// UnManagedProtectionSet native GUID is protection system GUID + consistency group ID
String nativeGuid = protectionSystem.getNativeGuid() + Constants.PLUS + cg.getCgId();
// First check to see if this protection set is already part of our managed DB
if (null != DiscoveryUtils.checkProtectionSetExistsInDB(dbClient, nativeGuid)) {
log.info("Protection Set " + nativeGuid + " already is managed by ViPR, skipping unmanaged discovery");
continue;
}
// Now check to see if the unmanaged CG exists in the database
UnManagedProtectionSet unManagedProtectionSet = DiscoveryUtils.checkUnManagedProtectionSetExistsInDB(dbClient, nativeGuid);
if (null == unManagedProtectionSet) {
log.info("Creating new unmanaged protection set for CG: " + cg.getCgName());
unManagedProtectionSet = new UnManagedProtectionSet();
unManagedProtectionSet.setId(URIUtil.createId(UnManagedProtectionSet.class));
unManagedProtectionSet.setNativeGuid(nativeGuid);
unManagedProtectionSet.setProtectionSystemUri(protectionSystem.getId());
StringSet protectionId = new StringSet();
protectionId.add("" + cg.getCgId());
unManagedProtectionSet.putCGInfo(SupportedCGInformation.PROTECTION_ID.toString(), protectionId);
// Default MP to false until proven otherwise
unManagedProtectionSet.getCGCharacteristics().put(UnManagedProtectionSet.SupportedCGCharacteristics.IS_MP.name(), Boolean.FALSE.toString());
newCG = true;
} else {
log.info("Found existing unmanaged protection set for CG: " + cg.getCgName() + ", using " + unManagedProtectionSet.getId().toString());
}
unManagedCGsReturnedFromProvider.add(unManagedProtectionSet.getId());
// Update the fields for the CG
unManagedProtectionSet.setCgName(cg.getCgName());
unManagedProtectionSet.setLabel(cg.getCgName());
// Indicate whether the CG is in a healthy state or not to ingest.
unManagedProtectionSet.getCGCharacteristics().put(UnManagedProtectionSet.SupportedCGCharacteristics.IS_HEALTHY.name(), cg.getCgState().equals(GetCGStateResponse.HEALTHY) ? Boolean.TRUE.toString() : Boolean.FALSE.toString());
// Indicate whether the CG is sync or async
unManagedProtectionSet.getCGCharacteristics().put(UnManagedProtectionSet.SupportedCGCharacteristics.IS_SYNC.name(), cg.getCgPolicy().synchronous ? Boolean.TRUE.toString() : Boolean.FALSE.toString());
// Fill in RPO type and value information
StringSet rpoType = new StringSet();
rpoType.add(cg.getCgPolicy().rpoType);
unManagedProtectionSet.putCGInfo(SupportedCGInformation.RPO_TYPE.toString(), rpoType);
StringSet rpoValue = new StringSet();
rpoValue.add(cg.getCgPolicy().rpoValue.toString());
unManagedProtectionSet.putCGInfo(SupportedCGInformation.RPO_VALUE.toString(), rpoValue);
if (null == cg.getCopies()) {
log.info("Protection Set " + nativeGuid + " does not contain any copies. Skipping...");
continue;
}
if (null == cg.getRsets()) {
log.info("Protection Set " + nativeGuid + " does not contain any replication sets. Skipping...");
continue;
}
// clean up the existing journal and replicationsets info in the unmanaged protection set, so that updated info is populated
if (!newCG) {
cleanUpUnManagedResources(unManagedProtectionSet, unManagedVolumesToUpdateByWwn, dbClient);
}
// Now map UnManagedVolume objects to the journal and rset (sources/targets) and put RP fields in them
Map<String, String> rpCopyAccessStateMap = new HashMap<String, String>();
mapCgJournals(unManagedProtectionSet, cg, rpCopyAccessStateMap, rpWwnToNativeWwn, storageNativeIdPrefixes, dbClient);
mapCgSourceAndTargets(unManagedProtectionSet, cg, rpCopyAccessStateMap, rpWwnToNativeWwn, storageNativeIdPrefixes, dbClient);
if (newCG) {
unManagedCGsInsert.add(unManagedProtectionSet);
} else {
unManagedCGsUpdate.add(unManagedProtectionSet);
}
} catch (Exception ex) {
log.error("Error processing RP CG {}", cg.getCgName(), ex);
}
}
handlePersistence(dbClient, false);
cleanUp(protectionSystem, dbClient);
}
Aggregations