use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class BlockRecoverPointIngestOrchestrator method performRPExportIngestion.
/**
* RecoverPoint volumes are expected to have export masks where the volume is exported to
* a RecoverPoint site. Therefore every RP volume (sources, targets, journals) will need to
* go through this code and have their export mask ingested. Even if the mask has already been
* ingested by a previous volume ingestion, this method still needs to update the ExportGroup and
* ExportMask objects to reflect the newly ingested volume as part of its management.
*
* @param volumeContext the RecoverPointVolumeIngestionContext for the volume currently being ingested
* @param unManagedVolume unmanaged volume
* @param volume managed volume
* @return managed volume with export ingested
*/
private void performRPExportIngestion(IngestionRequestContext parentRequestContext, RecoverPointVolumeIngestionContext volumeContext, UnManagedVolume unManagedVolume, Volume volume) {
_logger.info("starting RecoverPoint export ingestion for volume {}", volume.forDisplay());
Project project = volumeContext.getProject();
ProtectionSystem protectionSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
List<UnManagedExportMask> unManagedRPExportMasks = findUnManagedRPExportMask(protectionSystem, unManagedVolume);
if (unManagedRPExportMasks.isEmpty()) {
_logger.error("Could not find any unmanaged export masks associated with volume: " + unManagedVolume.getLabel());
throw IngestionException.exceptions.noUnManagedExportMaskFound(unManagedVolume.getNativeGuid());
}
// Keep a map for internal site name name and varray
Map<String, VirtualArray> internalSiteToVarrayMap = new HashMap<String, VirtualArray>();
internalSiteToVarrayMap.put(volume.getInternalSiteName(), volumeContext.getVarray(unManagedVolume));
// If this is a MetroPoint volume we're going to have multiple ExportMasks/ExportGroups to deal with.
// We'll need to query the backend volumes for extra info to populate internalSiteToVarrayMap so
// we can properly line up the ExportMasks/ExportGroups.
boolean metropoint = RPHelper.isMetroPointVolume(_dbClient, volume);
if (metropoint) {
// We need the VPLEX ingest context to get the backend volume info
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
for (String associatedVolumeIdStr : vplexVolumeContext.getAssociatedVolumeIds(volume)) {
// Find the associated volumes using the context maps or the db if they are already there
Volume associatedVolume = VolumeIngestionUtil.findVolume(_dbClient, vplexVolumeContext.getBlockObjectsToBeCreatedMap(), vplexVolumeContext.getDataObjectsToBeUpdatedMap(), associatedVolumeIdStr);
String internalSiteName = associatedVolume.getInternalSiteName();
// If we don't already have an entry for this internal site name, let's add it now.
if (!internalSiteToVarrayMap.containsKey(internalSiteName)) {
internalSiteToVarrayMap.put(internalSiteName, _dbClient.queryObject(VirtualArray.class, associatedVolume.getVirtualArray()));
}
}
}
// this will more than likely only loop once.
for (Entry<String, VirtualArray> entry : internalSiteToVarrayMap.entrySet()) {
String internalSiteName = entry.getKey();
VirtualArray virtualArray = entry.getValue();
UnManagedExportMask em = null;
if (metropoint) {
// Since we're flagged for MetroPoint we need to determine which ExportMask to use.
// We need the MetroPoint volume to be added to BOTH ExportGroups that represent the
// two Storage Views on VPLEX for cluster-1 and cluster-2.
// So let's use the varray to find the cluster we're looking for on this pass and match
// it to the maskingViewParth of the UnManagedExportMask.
// This should line things up roughly as:
// VPLEX Storage View 1 -> VPLEX Cluster1 + RPA1
// VPLEX Storage View 2 -> VPLEX Cluster2 + RPA2
String vplexCluster = ConnectivityUtil.getVplexClusterForVarray(virtualArray.getId(), storageSystem.getId(), _dbClient);
// First try and match based on UnManagedExportMask ports
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
for (String portUri : exportMask.getKnownStoragePortUris()) {
StoragePort port = _dbClient.queryObject(StoragePort.class, URI.create(portUri));
if (port != null && !port.getInactive()) {
String vplexClusterForMask = ConnectivityUtil.getVplexClusterOfPort(port);
if (vplexCluster.equals(vplexClusterForMask)) {
em = exportMask;
break;
}
}
}
if (em != null) {
break;
}
}
if (em == null) {
// It really shouldn't come to this, but leaving this code just in case.
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
if (exportMask.getMaskingViewPath().contains("cluster-" + vplexCluster)) {
em = exportMask;
break;
}
}
}
} else {
em = unManagedRPExportMasks.get(0);
}
// If the mask for ingested volume is in a mask that contains JOURNAL keyword, make sure the ExportGroup created contains
// that internal flag.
boolean isJournalExport = false;
if (em.getMaskName().toLowerCase().contains(VolumeIngestionUtil.RP_JOURNAL)) {
isJournalExport = true;
}
String exportGroupGeneratedName = RPHelper.generateExportGroupName(protectionSystem, storageSystem, internalSiteName, virtualArray, isJournalExport);
ExportGroup exportGroup = VolumeIngestionUtil.verifyExportGroupExists(parentRequestContext, exportGroupGeneratedName, project.getId(), em.getKnownInitiatorUris(), virtualArray.getId(), _dbClient);
boolean exportGroupCreated = false;
if (null == exportGroup) {
exportGroupCreated = true;
Integer numPaths = em.getZoningMap().size();
_logger.info("Creating Export Group with label {}", em.getMaskName());
exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, virtualArray, project, numPaths, isJournalExport);
}
if (null != exportGroup) {
// check if the ExportGroup has already been fetched
ExportGroup loadedExportGroup = parentRequestContext.findExportGroup(exportGroup.getLabel(), project.getId(), virtualArray.getId(), null, null);
if (null != loadedExportGroup) {
exportGroup = loadedExportGroup;
}
}
volumeContext.setExportGroup(exportGroup);
volumeContext.setExportGroupCreated(exportGroupCreated);
volumeContext.getRpExportGroupMap().put(exportGroup, exportGroupCreated);
// set RP device initiators to be used as the "host" for export mask ingestion
List<Initiator> initiators = new ArrayList<Initiator>();
Iterator<Initiator> initiatorItr = _dbClient.queryIterativeObjects(Initiator.class, URIUtil.toURIList(em.getKnownInitiatorUris()));
while (initiatorItr.hasNext()) {
initiators.add(initiatorItr.next());
}
volumeContext.setDeviceInitiators(initiators);
// find the ingest export strategy and call into for this unmanaged export mask
IngestExportStrategy ingestStrategy = ingestStrategyFactory.buildIngestExportStrategy(unManagedVolume);
volume = ingestStrategy.ingestExportMasks(unManagedVolume, volume, volumeContext);
if (null == volume) {
// ingestion did not succeed, but in case it wasn't, throw one
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method findOrCreateExportGroup.
/**
* Find or Create an ExportGroup.
*
* @param vplex -- VPLEX StorageSystem
* @param array -- Array StorageSystem
* @param initiators -- Collection<Initiator> representing VPLEX back-end ports.
* @param virtualArrayURI
* @param projectURI
* @param tenantURI
* @param numPaths Value of maxPaths to be put in ExportGroup
* @param unmanagedExportMask the unmanaged export mask
* @return existing or newly created ExportGroup (not yet persisted)
*/
private ExportGroup findOrCreateExportGroup(IngestionRequestContext requestContext, StorageSystem array, Collection<Initiator> initiators, URI virtualArrayURI, URI projectURI, URI tenantURI, int numPaths, UnManagedExportMask unmanagedExportMask) {
StorageSystem vplex = requestContext.getStorageSystem();
String arrayName = array.getSystemType().replace("block", "") + array.getSerialNumber().substring(array.getSerialNumber().length() - 4);
String groupName = unmanagedExportMask.getMaskName() + "_" + arrayName;
ExportGroup exportGroup = requestContext.findExportGroup(groupName, projectURI, virtualArrayURI, null, null);
if (null != exportGroup) {
_logger.info(String.format("Returning existing ExportGroup %s", exportGroup.getLabel()));
return exportGroup;
}
List<ExportGroup> exportGroups = CustomQueryUtility.queryActiveResourcesByConstraint(_dbClient, ExportGroup.class, PrefixConstraint.Factory.getFullMatchConstraint(ExportGroup.class, "label", groupName));
if (null != exportGroups && !exportGroups.isEmpty()) {
for (ExportGroup group : exportGroups) {
if (null != group) {
_logger.info(String.format("Returning existing ExportGroup %s", group.getLabel()));
exportGroup = group;
}
}
} else {
Map<String, ExportGroup> possibleExportGroups = new HashMap<String, ExportGroup>();
Set<String> initiatorUris = new HashSet<String>();
for (Initiator initiator : initiators) {
// Determine all the possible existing Export Groups
List<ExportGroup> groups = ExportUtils.getInitiatorExportGroups(initiator, _dbClient);
for (ExportGroup group : groups) {
if (!possibleExportGroups.containsKey(group.getId().toString())) {
possibleExportGroups.put(group.getId().toString(), group);
}
}
initiatorUris.add(initiator.getId().toString());
}
// If there are possible Export Groups, look for one with that matches on inits, varray, project, and tenant.
for (ExportGroup group : possibleExportGroups.values()) {
if (URIUtil.identical(group.getVirtualArray(), virtualArrayURI) && URIUtil.identical(group.getProject().getURI(), projectURI) && URIUtil.identical(group.getTenant().getURI(), tenantURI)) {
if (group.getInitiators().containsAll(initiatorUris)) {
_logger.info(String.format("Returning existing ExportGroup %s from database.", group.getLabel()));
return group;
}
}
}
// No existing group has the mask, let's create one.
exportGroup = new ExportGroup();
exportGroup.setLabel(groupName);
exportGroup.setProject(new NamedURI(projectURI, exportGroup.getLabel()));
exportGroup.setVirtualArray(vplex.getVirtualArray());
exportGroup.setTenant(new NamedURI(tenantURI, exportGroup.getLabel()));
exportGroup.setGeneratedName(groupName);
exportGroup.setVolumes(new StringMap());
exportGroup.setOpStatus(new OpStatusMap());
exportGroup.setVirtualArray(virtualArrayURI);
exportGroup.setNumPaths(numPaths);
// Add the initiators into the ExportGroup.
for (Initiator initiator : initiators) {
exportGroup.addInitiator(initiator);
}
_logger.info(String.format("Returning new ExportGroup %s", exportGroup.getLabel()));
}
return exportGroup;
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class RecoverPointVolumeIngestionContext method commit.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.api.service.impl.resource.blockingestorchestration.context.VolumeIngestionContext#commit()
*/
@Override
public void commit() {
_logger.info("persisting RecoverPoint backend for volume " + getUnmanagedVolume().forDisplay());
// commit the basic IngestionRequestContext collections
for (BlockObject bo : getObjectsIngestedByExportProcessing()) {
_logger.info("Creating BlockObject {} (hash {})", bo.forDisplay(), bo.hashCode());
_dbClient.createObject(bo);
}
for (BlockObject bo : getBlockObjectsToBeCreatedMap().values()) {
_logger.info("Creating BlockObject {} (hash {})", bo.forDisplay(), bo.hashCode());
_dbClient.createObject(bo);
}
for (Set<DataObject> createdObjects : getDataObjectsToBeCreatedMap().values()) {
if (createdObjects != null && !createdObjects.isEmpty()) {
for (DataObject dob : createdObjects) {
_logger.info("Creating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
_dbClient.createObject(dob);
}
}
}
for (Set<DataObject> updatedObjects : getDataObjectsToBeUpdatedMap().values()) {
if (updatedObjects != null && !updatedObjects.isEmpty()) {
for (DataObject dob : updatedObjects) {
if (dob.getInactive()) {
_logger.info("Deleting DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
} else {
_logger.info("Updating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
}
_dbClient.updateObject(dob);
}
}
}
for (UnManagedVolume umv : getUnManagedVolumesToBeDeleted()) {
_logger.info("Deleting UnManagedVolume {} (hash {})", umv.forDisplay(), umv.hashCode());
_dbClient.updateObject(umv);
}
// now commit the RecoverPoint specific data
if (_managedSourceVolumesToUpdate != null) {
_logger.info("Updating RP Source Volumes: " + _managedSourceVolumesToUpdate);
_dbClient.updateObject(_managedSourceVolumesToUpdate);
}
if (_unmanagedSourceVolumesToUpdate != null) {
_logger.info("Updating RP Source UnManagedVolumes: " + _unmanagedSourceVolumesToUpdate);
_dbClient.updateObject(_unmanagedSourceVolumesToUpdate);
}
if (_unmanagedTargetVolumesToUpdate != null) {
_logger.info("Updating RP Target UnManagedVolumes: " + _unmanagedTargetVolumesToUpdate);
_dbClient.updateObject(_unmanagedTargetVolumesToUpdate);
}
// commit the ProtectionSet, if created, and remove the UnManagedProtectionSet
ProtectionSet managedProtectionSet = getManagedProtectionSet();
if (null != managedProtectionSet) {
if (getManagedBlockObject() != null) {
managedProtectionSet.getVolumes().add(_managedBlockObject.getId().toString());
}
_logger.info("Creating ProtectionSet {} (hash {})", managedProtectionSet.forDisplay(), managedProtectionSet.hashCode());
_dbClient.createObject(managedProtectionSet);
// the protection set was created, so delete the unmanaged one
_logger.info("Deleting UnManagedProtectionSet {} (hash {})", _unManagedProtectionSet.forDisplay(), _unManagedProtectionSet.hashCode());
_dbClient.removeObject(_unManagedProtectionSet);
}
// commit the BlockConsistencyGroup, if created
if (null != getManagedBlockConsistencyGroup()) {
_logger.info("Creating BlockConsistencyGroup {} (hash {})", _managedBlockConsistencyGroup.forDisplay(), _managedBlockConsistencyGroup.hashCode());
_dbClient.createObject(_managedBlockConsistencyGroup);
}
for (Entry<ExportGroup, Boolean> entry : getRpExportGroupMap().entrySet()) {
ExportGroup exportGroup = entry.getKey();
boolean exportGroupIsCreated = entry.getValue();
if (exportGroupIsCreated) {
_logger.info("Creating ExportGroup {} (hash {})", exportGroup.forDisplay(), exportGroup.hashCode());
_dbClient.createObject(exportGroup);
} else {
_logger.info("Updating ExportGroup {} (hash {})", exportGroup.forDisplay(), exportGroup.hashCode());
_dbClient.updateObject(exportGroup);
}
}
super.commit();
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class IngestVolumesExportedSchedulingThread method run.
@Override
public void run() {
try {
_requestContext.reset();
URI varrayId = null;
while (_requestContext.hasNext()) {
UnManagedVolume unManagedVolume = _requestContext.next();
_logger.info("Ingestion starting for exported unmanaged volume {}", unManagedVolume.getNativeGuid());
if (null == varrayId) {
varrayId = _requestContext.getVarray(unManagedVolume).getId();
}
TaskResourceRep resourceRep = _taskMap.get(unManagedVolume.getId().toString());
String taskId = resourceRep != null ? resourceRep.getOpId() : null;
try {
URI storageSystemUri = unManagedVolume.getStorageSystemUri();
StorageSystem system = _requestContext.getStorageSystemCache().get(storageSystemUri.toString());
if (null == system) {
system = _dbClient.queryObject(StorageSystem.class, storageSystemUri);
_requestContext.getStorageSystemCache().put(storageSystemUri.toString(), system);
}
// Build the Strategy , which contains reference to Block object & export orchestrators
IngestStrategy ingestStrategy = _ingestStrategyFactory.buildIngestStrategy(unManagedVolume, !IngestStrategyFactory.DISREGARD_PROTECTION);
@SuppressWarnings("unchecked") BlockObject blockObject = ingestStrategy.ingestBlockObjects(_requestContext, VolumeIngestionUtil.getBlockObjectClass(unManagedVolume));
if (null == blockObject) {
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
_requestContext.getBlockObjectsToBeCreatedMap().put(blockObject.getNativeGuid(), blockObject);
_requestContext.getProcessedUnManagedVolumeMap().put(unManagedVolume.getNativeGuid(), _requestContext.getVolumeContext());
_logger.info("Volume ingestion completed successfully for exported unmanaged volume {} (export ingestion will follow)", unManagedVolume.getNativeGuid());
} catch (APIException ex) {
_logger.error("error: " + ex.getLocalizedMessage(), ex);
_dbClient.error(UnManagedVolume.class, _requestContext.getCurrentUnManagedVolumeUri(), taskId, ex);
_requestContext.getVolumeContext().rollback();
} catch (Exception ex) {
_logger.error("error: " + ex.getLocalizedMessage(), ex);
_dbClient.error(UnManagedVolume.class, _requestContext.getCurrentUnManagedVolumeUri(), taskId, IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), ex.getLocalizedMessage()));
_requestContext.getVolumeContext().rollback();
}
}
_logger.info("Ingestion of all the unmanaged volumes has completed.");
// next ingest the export masks for the unmanaged volumes which have been fully ingested
_logger.info("Ingestion of unmanaged export masks for all requested volumes starting.");
ingestBlockExportMasks(_requestContext, _taskMap);
for (VolumeIngestionContext volumeContext : _requestContext.getProcessedUnManagedVolumeMap().values()) {
// If there is a CG involved in the ingestion, organize, pollenate, and commit.
_unManagedVolumeService.commitIngestedCG(_requestContext, volumeContext.getUnmanagedVolume());
// commit the volume itself
volumeContext.commit();
}
for (BlockObject bo : _requestContext.getObjectsIngestedByExportProcessing()) {
_logger.info("Ingestion Wrap Up: Creating BlockObject {} (hash {})", bo.forDisplay(), bo.hashCode());
_dbClient.createObject(bo);
}
for (UnManagedVolume umv : _requestContext.getUnManagedVolumesToBeDeleted()) {
_logger.info("Ingestion Wrap Up: Deleting UnManagedVolume {} (hash {})", umv.forDisplay(), umv.hashCode());
_dbClient.updateObject(umv);
}
// Update the related objects if any after successful export mask ingestion
for (Entry<String, Set<DataObject>> updatedObjectsEntry : _requestContext.getDataObjectsToBeUpdatedMap().entrySet()) {
if (updatedObjectsEntry != null) {
_logger.info("Ingestion Wrap Up: Updating objects for UnManagedVolume URI " + updatedObjectsEntry.getKey());
for (DataObject dob : updatedObjectsEntry.getValue()) {
if (dob.getInactive()) {
_logger.info("Ingestion Wrap Up: Deleting DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
} else {
_logger.info("Ingestion Wrap Up: Updating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
}
_dbClient.updateObject(dob);
}
}
}
// Create the related objects if any after successful export mask ingestion
for (Set<DataObject> createdObjects : _requestContext.getDataObjectsToBeCreatedMap().values()) {
if (createdObjects != null && !createdObjects.isEmpty()) {
for (DataObject dob : createdObjects) {
_logger.info("Ingestion Wrap Up: Creating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
_dbClient.createObject(dob);
}
}
}
ExportGroup exportGroup = _requestContext.getExportGroup();
if (_requestContext.isExportGroupCreated()) {
_logger.info("Ingestion Wrap Up: Creating ExportGroup {} (hash {})", exportGroup.forDisplay(), exportGroup.hashCode());
_dbClient.createObject(exportGroup);
} else {
_logger.info("Ingestion Wrap Up: Updating ExportGroup {} (hash {})", exportGroup.forDisplay(), exportGroup.hashCode());
_dbClient.updateObject(exportGroup);
}
// record the events after they have been persisted
for (BlockObject volume : _requestContext.getObjectsIngestedByExportProcessing()) {
_unManagedVolumeService.recordVolumeOperation(_dbClient, _unManagedVolumeService.getOpByBlockObjectType(volume), Status.ready, volume.getId());
}
} catch (InternalException e) {
_logger.error("InternalException occurred due to: {}", e);
throw e;
} catch (Exception e) {
_logger.error("Unexpected exception occurred due to: {}", e);
throw APIException.internalServerErrors.genericApisvcError(ExceptionUtils.getExceptionMessage(e), e);
} finally {
// it, then we should clean it up in the database (CTRL-8520)
if ((null != _requestContext) && _requestContext.isExportGroupCreated() && _requestContext.getObjectsIngestedByExportProcessing().isEmpty()) {
_logger.info("Ingestion Wrap Up: an export group was created, but no volumes were ingested into it");
if (_requestContext.getExportGroup().getVolumes() == null || _requestContext.getExportGroup().getVolumes().isEmpty()) {
_logger.info("Ingestion Wrap Up: since no volumes are present, marking {} for deletion", _requestContext.getExportGroup().getLabel());
_dbClient.markForDeletion(_requestContext.getExportGroup());
}
}
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class MaskPerHostIngestOrchestrator method getExportMaskAlreadyIngested.
/**
* maskPerHost Mode guaranteed to have initiators in only 1 export mask
* always.
*/
@Override
protected ExportMask getExportMaskAlreadyIngested(UnManagedExportMask mask, IngestionRequestContext requestContext, DbClient dbClient) {
ExportMask eMask = null;
boolean maskFound = false;
List<URI> initiatorUris = new ArrayList<URI>(Collections2.transform(mask.getKnownInitiatorUris(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
for (URI ini : initiatorUris) {
List<URI> exportMaskUris = _dbClient.queryByConstraint(AlternateIdConstraint.Factory.getExportMaskInitiatorConstraint(ini.toString()));
if (null == exportMaskUris) {
return eMask;
}
for (URI eMaskUri : exportMaskUris) {
ExportMask potentialMask = _dbClient.queryObject(ExportMask.class, eMaskUri);
if (potentialMask.getStorageDevice() != null && potentialMask.getStorageDevice().equals(mask.getStorageSystemUri())) {
ExportGroup eg = requestContext.getExportGroup();
if (null != eg && null != eg.getExportMasks() && eg.getExportMasks().contains(eMaskUri.toString())) {
// COP-36231: Check whether we are ingesting into existing EG's. This will make sure to pick the right ExportMask
// when there are cluster & exclusive EMs exists.
_logger.info("Found Mask {} in the export Group {}", eMaskUri, eg.getId());
eMask = potentialMask;
maskFound = true;
break;
} else {
_logger.info("Found Mask {} with matching initiator and unmatched Storage System & EG. Skipping mask", eMaskUri);
}
} else {
_logger.info("Found Mask {} with matching initiator and unmatched Storage System. Skipping mask", eMaskUri);
}
}
if (maskFound) {
break;
}
}
return eMask;
}
Aggregations