use of com.emc.storageos.db.client.model.StoragePort in project coprhd-controller by CoprHD.
the class NetworkDeviceController method refreshZoningMap.
/**
* Update the zoning map for as export mask previously "accepted". This applies to
* brown field scenarios where a export mask was found on the storage array. For
* those export masks, changes outside of the application are expected and the
* application should get the latest state before making any changes. This
* function is called from ExportMaskOperations#refreshZoneMap after all
* updates to the initiators, ports and volumes were made into the export mask and
* the export group. The update steps are as follow:
* <ol>
* <li>Get the current zones for those initiators that were not added by ViPR and the storage ports that exist in the mask.</li>
* <li>Diff the current zones with those in the export mask and update the zoning map</li>
* <li>Update the FCZoneReferences to match the zone updates</li>
* </ol>
* Note that ViPR does not keep FCZoneReferences only for volumes created by ViPR. As those
* volumes are not updated by ExportMaskOperations#refreshZoneMap, no additional code
* is needed to remove FCZoneReferences for removed volumes.
*
* @param exportMask the export mask being updated.
* @param removedInitiators the list of initiators that were removed. This is needed because
* these were removed from the zoingMap by {@link ExportMask#removeInitiators(Collection)}
* @param removedPorts the set of storage ports that were removed
* @param maskUpdated a flag that indicates if an update was made to the mask that requires
* a zoning refresh
* @param persist a boolean that indicates if the changes should be persisted in the db
*/
public void refreshZoningMap(ExportMask exportMask, Collection<String> removedInitiators, Collection<String> removedPorts, boolean maskUpdated, boolean persist) {
try {
// check if zoning is enabled for the mask
if (!zoningEnabled(exportMask)) {
_log.info("Zoning not enabled for export mask {}. Zoning refresh will not be done", exportMask.getMaskName());
return;
}
if (!(maskUpdated || alwaysRefreshZone())) {
_log.info("The mask ports and initiators were not modified and alwaysRefreshZones is false" + " Zoning refresh will not be done for mask {}", exportMask.getMaskName());
return;
}
List<Initiator> initiators = ExportUtils.getExportMaskInitiators(exportMask, _dbClient);
_log.info("Refreshing zones for export mask {}. \n\tCurrent initiators " + "in this mask are: {}. \n\tStorage ports in the mask are : {}. \n\tZoningMap is : {}. " + "\n\tRemoved initiators: {}. \n\tRemoved ports: {}", new Object[] { exportMask.getMaskName(), exportMask.getInitiators(), exportMask.getStoragePorts(), exportMask.getZoningMap(), removedInitiators, removedPorts });
Long start = System.currentTimeMillis();
// get the current zones in the network system for initiators and ports
List<StoragePort> storagePorts = ExportUtils.getStoragePorts(exportMask, _dbClient);
ZoneInfoMap zoneInfoMap = getInitiatorsZoneInfoMap(initiators, storagePorts);
// Get the full sets of initiators and ports affected. They will be used to find the FCZoneReferences to refresh
// These sets include new initiators and ports, existing ones that did not change, as well as removed ones
List<StoragePort> allStoragePorts = DataObjectUtils.iteratorToList(_dbClient.queryIterativeObjects(StoragePort.class, StringSetUtil.stringSetToUriList(removedPorts)));
allStoragePorts.addAll(storagePorts);
List<Initiator> allInitiators = DataObjectUtils.iteratorToList(_dbClient.queryIterativeObjects(Initiator.class, StringSetUtil.stringSetToUriList(removedInitiators)));
allInitiators.addAll(initiators);
// Make a copy of the zoning mask - Zones have already been removed for removed initiators, put them back
// This zoning map will be used to do diff between old and new and to get zone references
StringSetMap allZonesMap = new StringSetMap();
StringSetMap tempMap = exportMask.getZoningMap() == null ? new StringSetMap() : exportMask.getZoningMap();
for (String key : tempMap.keySet()) {
// when the zoning map is removed prematurely, this ports set is empty but not null
if (removedInitiators.contains(key) && (tempMap.get(key) == null || tempMap.get(key).isEmpty())) {
// this was prematurely cleared, we will assume all ports
// were zoned to make sure we clean up all FCZoneReferences
allZonesMap.put(key, new StringSet(removedPorts));
if (exportMask.getStoragePorts() != null) {
allZonesMap.get(key).addAll(exportMask.getStoragePorts());
}
} else {
allZonesMap.put(key, new StringSet(tempMap.get(key)));
}
}
// get all the zone references that exist in the database for this export mask.
Map<String, List<FCZoneReference>> existingRefs = getZoneReferences(allZonesMap, allInitiators, allStoragePorts);
// initialize results collections
List<ZoneInfo> addedZoneInfos = new ArrayList<ZoneInfo>();
List<ZoneInfo> updatedZoneInfos = new ArrayList<ZoneInfo>();
List<String> removedZonesKeys = new ArrayList<String>();
// Compare old and new zones. Initialize some loop variables.
ZoneInfo zoneInfo = null;
String initId = null;
String portId = null;
if (exportMask.getZoningMap() == null) {
exportMask.setZoningMap(new StringSetMap());
}
for (Entry<String, ZoneInfo> entry : zoneInfoMap.entrySet()) {
zoneInfo = entry.getValue();
initId = zoneInfo.getInitiatorId();
portId = zoneInfo.getPortId();
if (exportMask.getZoningMap().containsKey(initId) && exportMask.getZoningMap().get(initId).contains(portId)) {
_log.debug("Zoning between initiator {} and port {} did not change", zoneInfo.getInitiatorWwn(), zoneInfo.getPortWwn());
// This is accounted for, let's remove it from our diff map
allZonesMap.remove(initId, portId);
// add the zone info so that it can be updated for changes like zone name change
updatedZoneInfos.add(zoneInfo);
} else {
_log.info("New zone was found between initiator {} and port {} and will be added", zoneInfo.getInitiatorWwn(), zoneInfo.getPortWwn());
// sometimes zones have more than one initiator or port
if (exportMask.hasExistingInitiator(zoneInfo.getInitiatorWwn())) {
// This is a new entry, add it to the zoning map
exportMask.getZoningMap().put(initId, portId);
// add it to the results so that the appropriate FCZoneReferences are added
addedZoneInfos.add(zoneInfo);
}
// This zone is not expected to be in the diff map, but try anyway
allZonesMap.remove(initId, portId);
}
}
// If anything is remaining zones in the diff zoning map, these were removed in the network system
Initiator initiator = null;
StoragePort port = null;
for (String key : allZonesMap.keySet()) {
initiator = DataObjectUtils.findInCollection(allInitiators, key);
if (allZonesMap.get(key) != null && !allZonesMap.get(key).isEmpty()) {
for (String val : allZonesMap.get(key)) {
port = DataObjectUtils.findInCollection(allStoragePorts, val);
_log.info("Zone between initiator {} and port {} was removed from the network system" + " or no longer belongs to this mask.", key, val);
if (port == null || initiator == null) {
// the port or initiator were removed at some point
exportMask.getZoningMap().remove(key, val);
_log.info("Removed zoningMap entry between initiator {} and port {} because " + "the port and/or the initiator were removed from the mask", key, val);
} else if (removedInitiators.contains(key) || removedPorts.contains(val)) {
// the port or initiator were removed, remove the zone map entry
exportMask.getZoningMap().remove(key, val);
_log.info("Removed zoningMap entry between initiator {} and port {} because " + "the port and/or the initiator were removed from the mask", initiator.getInitiatorPort(), port.getPortNetworkId());
} else if (exportMask.hasExistingInitiator(WWNUtility.getUpperWWNWithNoColons(initiator.getInitiatorPort()))) {
exportMask.getZoningMap().remove(key, val);
_log.info("Removed zoningMap entry between initiator {} and port {} because " + "this was a brownfield zone for a brownfield initiator", initiator.getInitiatorPort(), port.getPortNetworkId());
} else {
_log.info("The zone between initiator {} and port {} was removed from " + " the network system but the zoningMap entry will be kept because it was" + " a ViPR initiator-port assignment", initiator.getInitiatorPort(), port.getPortNetworkId());
}
if (port != null && initiator != null) {
removedZonesKeys.add(FCZoneReference.makeEndpointsKey(initiator.getInitiatorPort(), port.getPortNetworkId()));
}
}
}
}
// get all the existing zone references from the database, these are
refreshFCZoneReferences(exportMask, existingRefs, addedZoneInfos, updatedZoneInfos, removedZonesKeys);
if (persist) {
_dbClient.updateAndReindexObject(exportMask);
}
_log.info("Changed zones for export mask {} to {}. \nRefreshing zones took {} ms", new Object[] { exportMask.getMaskName(), exportMask.getZoningMap(), (System.currentTimeMillis() - start) });
} catch (Exception ex) {
_log.error("An exception occurred while updating zoning map for export mask {} with message {}", new Object[] { exportMask.getMaskName(), ex.getMessage() }, ex);
}
}
use of com.emc.storageos.db.client.model.StoragePort in project coprhd-controller by CoprHD.
the class FileService method export.
/**
* Export file system.
*
* <p>
* NOTE: This is an asynchronous operation.
*
* @param param
* File system export parameters
* @param id
* the URN of a ViPR File system
* @brief Create file export
* @return Task resource representation
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/exports")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.OWN, ACL.ALL })
public TaskResourceRep export(@PathParam("id") URI id, FileSystemExportParam param) throws InternalException {
_log.info("Export request recieved {}", id);
// check file System
ArgValidator.checkFieldUriType(id, FileShare.class, "id");
ArgValidator.checkFieldValueFromEnum(param.getPermissions(), "permissions", EnumSet.allOf(FileShareExport.Permissions.class));
_log.info("Export security type {}", param.getSecurityType());
for (String sectype : param.getSecurityType().split(",")) {
ArgValidator.checkFieldValueFromEnum(sectype.trim(), "type", EnumSet.allOf(FileShareExport.SecurityTypes.class));
}
ArgValidator.checkFieldValueFromEnum(param.getProtocol(), "protocol", EnumSet.allOf(StorageProtocol.File.class));
validateIpInterfacesRegistered(param.getEndpoints(), _dbClient);
FileShare fs = queryResource(id);
String task = UUID.randomUUID().toString();
StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
ArgValidator.checkEntity(fs, id, isIdEmbeddedInURL(id));
// Check for VirtualPool whether it has NFS enabled
VirtualPool vpool = _dbClient.queryObject(VirtualPool.class, fs.getVirtualPool());
if (!vpool.getProtocols().contains(StorageProtocol.File.NFS.name()) && !vpool.getProtocols().contains(StorageProtocol.File.NFSv4.name())) {
// Throw an error
throw APIException.methodNotAllowed.vPoolDoesntSupportProtocol("Vpool doesn't support " + StorageProtocol.File.NFS.name() + " or " + StorageProtocol.File.NFSv4 + " protocol");
}
// locate storage port for exporting file System
StoragePort sport = _fileScheduler.placeFileShareExport(fs, param.getProtocol(), param.getEndpoints());
String path = fs.getPath();
String mountPath = fs.getMountPath();
String subDirectory = param.getSubDirectory();
if (ArgValidator.checkSubDirName("sub_directory", param.getSubDirectory())) {
// Add subdirectory to the path as this is a subdirectory export
path += "/" + param.getSubDirectory();
mountPath += "/" + param.getSubDirectory();
}
FSExportMap exportMap = fs.getFsExports();
if (exportMap != null) {
Iterator it = fs.getFsExports().keySet().iterator();
boolean exportExists = false;
while (it.hasNext()) {
String fsExpKey = (String) it.next();
FileExport fileExport = fs.getFsExports().get(fsExpKey);
if (fileExport.getPath().equalsIgnoreCase(path)) {
exportExists = true;
break;
}
}
if (exportExists) {
throw APIException.badRequests.fileSystemHasExistingExport();
}
}
String rootUserMapping = param.getRootUserMapping();
if (rootUserMapping != null) {
rootUserMapping = rootUserMapping.toLowerCase();
}
// check for bypassDnsCheck flag. If null then set to false
Boolean dnsCheck = param.getBypassDnsCheck();
if (dnsCheck == null) {
dnsCheck = false;
}
FileShareExport export = new FileShareExport(param.getEndpoints(), param.getSecurityType(), param.getPermissions(), rootUserMapping, param.getProtocol(), sport.getPortGroup(), sport.getPortNetworkId(), path, mountPath, subDirectory, param.getComments(), dnsCheck);
_log.info(String.format("FileShareExport --- FileShare id: %1$s, Clients: %2$s, StoragePort: %3$s, SecurityType: %4$s, " + "Permissions: %5$s, Root user mapping: %6$s, Protocol: %7$s, path: %8$s, mountPath: %9$s, SubDirectory: %10$s ,byPassDnsCheck: %11$s", id, export.getClients(), sport.getPortName(), export.getSecurityType(), export.getPermissions(), export.getRootUserMapping(), export.getProtocol(), export.getPath(), export.getMountPath(), export.getSubDirectory(), export.getBypassDnsCheck()));
Operation op = _dbClient.createTaskOpStatus(FileShare.class, fs.getId(), task, ResourceOperationTypeEnum.EXPORT_FILE_SYSTEM);
op.setDescription("Filesystem export");
FileServiceApi fileServiceApi = getFileShareServiceImpl(fs, _dbClient);
fileServiceApi.export(device.getId(), fs.getId(), Arrays.asList(export), task);
auditOp(OperationTypeEnum.EXPORT_FILE_SYSTEM, true, AuditLogManager.AUDITOP_BEGIN, fs.getId().toString(), device.getId().toString(), export.getClients(), param.getSecurityType(), param.getPermissions(), param.getRootUserMapping(), param.getProtocol());
return toTask(fs, task, op);
}
use of com.emc.storageos.db.client.model.StoragePort in project coprhd-controller by CoprHD.
the class FileSnapshotService method share.
/**
* Creates SMB file share.
* <p>
* Note: This is an asynchronous operation.
*
* @param id
* the URN of a ViPR Snapshot
* @param param
* File system share parameters
* @brief Create file snapshot SMB share
* @return Task resource representation
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/shares")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.ANY })
public TaskResourceRep share(@PathParam("id") URI id, FileSystemShareParam param) throws InternalException {
String task = UUID.randomUUID().toString();
ArgValidator.checkFieldUriType(id, Snapshot.class, "id");
ArgValidator.checkFieldNotNull(param.getShareName(), "name");
ArgValidator.checkFieldNotEmpty(param.getShareName(), "name");
Snapshot snap = queryResource(id);
FileShare fs = _permissionsHelper.getObjectById(snap.getParent(), FileShare.class);
StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
FileController controller = getController(FileController.class, device.getSystemType());
ArgValidator.checkEntity(snap, id, isIdEmbeddedInURL(id));
// Let us make sure that a share with the same name does not already exist.
String shareName = param.getShareName();
if (CifsShareUtility.doesShareExist(snap, shareName)) {
_log.error("CIFS share: {}, already exists", shareName);
throw APIException.badRequests.duplicateEntityWithField("CIFS share", "name");
}
// If value of permission is not provided, set the value to read-only
if (param.getPermission() == null || param.getPermission().isEmpty()) {
param.setPermission(FileSMBShare.Permission.read.name());
}
if (!param.getPermission().equals(FileSMBShare.Permission.read.name())) {
throw APIException.badRequests.snapshotSMBSharePermissionReadOnly();
}
// Locate storage port for sharing snapshot
// Select IP port of the storage array, owning the parent file system, which belongs to the same varray as the
// file system.
// We use file system in the call since file snap belongs to the same neighbourhood as its parent file system
StoragePort sport = _fileScheduler.placeFileShareExport(fs, StorageProtocol.File.CIFS.name(), null);
// Check if maxUsers is "unlimited" and set it to -1 in this case.
if (param.getMaxUsers().equalsIgnoreCase(FileService.UNLIMITED_USERS)) {
param.setMaxUsers("-1");
}
String path = snap.getPath();
_log.info("Path {}", path);
_log.info("Param Share Name : {} SubDirectory : {}", param.getShareName(), param.getSubDirectory());
boolean isSubDirPath = false;
if (ArgValidator.checkSubDirName("subDirectory", param.getSubDirectory())) {
path += "/" + param.getSubDirectory();
isSubDirPath = true;
_log.info("Sub-directory path {}", path);
}
FileSMBShare smbShare = new FileSMBShare(param.getShareName(), param.getDescription(), param.getPermissionType(), param.getPermission(), param.getMaxUsers(), null, path);
smbShare.setStoragePortName(sport.getPortName());
smbShare.setStoragePortNetworkId(sport.getPortNetworkId());
smbShare.setStoragePortGroup(sport.getPortGroup());
smbShare.setSubDirPath(isSubDirPath);
_log.info(String.format("Create snapshot share --- Snap id: %1$s, Share name: %2$s, StoragePort: %3$s, PermissionType: %4$s, " + "Permissions: %5$s, Description: %6$s, maxUsers: %7$s", id, smbShare.getName(), sport.getPortName(), smbShare.getPermissionType(), smbShare.getPermission(), smbShare.getDescription(), smbShare.getMaxUsers()));
_log.info("SMB share path: {}", smbShare.getPath());
Operation op = _dbClient.createTaskOpStatus(Snapshot.class, snap.getId(), task, ResourceOperationTypeEnum.CREATE_FILE_SNAPSHOT_SHARE);
FileServiceApi fileServiceApi = FileService.getFileShareServiceImpl(fs, _dbClient);
fileServiceApi.share(device.getId(), snap.getId(), smbShare, task);
auditOp(OperationTypeEnum.CREATE_FILE_SNAPSHOT_SHARE, true, AuditLogManager.AUDITOP_BEGIN, smbShare.getName(), smbShare.getPermissionType(), smbShare.getPermission(), smbShare.getMaxUsers(), smbShare.getDescription(), snap.getId().toString());
return toTask(snap, task, op);
}
use of com.emc.storageos.db.client.model.StoragePort in project coprhd-controller by CoprHD.
the class NetworkSystemService method deleteNetworkSystem.
/**
* Delete a network system. The method will delete the
* network system and all resources associated with it.
*
* @prereq The network system must be unregistered
* @brief Delete network system
* @return An asynchronous task.
*
* @throws DatabaseException
* When an error occurs querying the database.
*/
@POST
@Path("/{id}/deactivate")
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN })
public TaskResourceRep deleteNetworkSystem(@PathParam("id") URI id) throws DatabaseException {
NetworkSystem system = queryObject(NetworkSystem.class, id, true);
ArgValidator.checkEntity(system, id, isIdEmbeddedInURL(id));
if (!RegistrationStatus.UNREGISTERED.toString().equals(system.getRegistrationStatus())) {
throw APIException.badRequests.invalidParameterCannotDeactivateRegisteredNetworkSystem(system.getId());
}
if (DiscoveredDataObject.DataCollectionJobStatus.IN_PROGRESS.toString().equals(system.getDiscoveryStatus()) || DiscoveredDataObject.DataCollectionJobStatus.SCHEDULED.toString().equals(system.getDiscoveryStatus())) {
throw APIException.serviceUnavailable.cannotDeactivateStorageSystemWhileInDiscover(system.getId());
}
List<Network> networkList = CustomQueryUtility.queryActiveResourcesByConstraint(_dbClient, Network.class, AlternateIdConstraint.Factory.getConstraint(Network.class, "networkSystems", system.getId().toString()));
for (Network network : networkList) {
if (network != null && network.getInactive() != true && network.getConnectedVirtualArrays() != null && !network.getConnectedVirtualArrays().isEmpty() && (network.getNetworkSystems() != null && network.getNetworkSystems().contains(system.getId().toString()) && network.getNetworkSystems().size() == 1)) {
throw APIException.badRequests.invalidParameterNetworkMustBeUnassignedFromVirtualArray(network.getLabel(), system.getLabel());
}
}
Map<String, List<FCZoneReference>> zonesMap = getNetworkSystemZoneRefs(system);
List<URI> nsystems = null;
List<FCZoneReference> zones = null;
// by the purge process
for (Network network : networkList) {
// remove references from ports
nsystems = StringSetUtil.stringSetToUriList(network.getNetworkSystems());
nsystems.remove(system.getId());
if (nsystems.isEmpty()) {
// This network will be removed - Remove any storage port references
List<StoragePort> netPorts = NetworkAssociationHelper.getNetworkStoragePorts(network.getId().toString(), null, _dbClient);
NetworkAssociationHelper.clearPortAssociations(netPorts, _dbClient);
} else {
// This network will remain, update any zone references to use another network system
URI nsUri = nsystems.get(0);
zones = zonesMap.get(network.getNativeId());
if (zones != null) {
for (FCZoneReference zone : zones) {
zone.setNetworkSystemUri(nsUri);
}
_dbClient.updateObject(zones);
}
}
}
String taskId = UUID.randomUUID().toString();
Operation op = _dbClient.createTaskOpStatus(NetworkSystem.class, system.getId(), taskId, ResourceOperationTypeEnum.DELETE_NETWORK_SYSTEM);
PurgeRunnable.executePurging(_dbClient, _dbPurger, _asynchJobService.getExecutorService(), system, _retry_attempts, taskId, 60);
auditOp(OperationTypeEnum.DELETE_NETWORK_SYSTEM, true, AuditLogManager.AUDITOP_BEGIN, system.getId().toString(), system.getLabel(), system.getPortNumber(), system.getUsername(), system.getSmisProviderIP(), system.getSmisPortNumber(), system.getSmisUserName(), system.getSmisUseSSL(), system.getVersion(), system.getUptime());
return toTask(system, taskId, op);
}
use of com.emc.storageos.db.client.model.StoragePort in project coprhd-controller by CoprHD.
the class BlockRecoverPointIngestOrchestrator method performRPExportIngestion.
/**
* RecoverPoint volumes are expected to have export masks where the volume is exported to
* a RecoverPoint site. Therefore every RP volume (sources, targets, journals) will need to
* go through this code and have their export mask ingested. Even if the mask has already been
* ingested by a previous volume ingestion, this method still needs to update the ExportGroup and
* ExportMask objects to reflect the newly ingested volume as part of its management.
*
* @param volumeContext the RecoverPointVolumeIngestionContext for the volume currently being ingested
* @param unManagedVolume unmanaged volume
* @param volume managed volume
* @return managed volume with export ingested
*/
private void performRPExportIngestion(IngestionRequestContext parentRequestContext, RecoverPointVolumeIngestionContext volumeContext, UnManagedVolume unManagedVolume, Volume volume) {
_logger.info("starting RecoverPoint export ingestion for volume {}", volume.forDisplay());
Project project = volumeContext.getProject();
ProtectionSystem protectionSystem = _dbClient.queryObject(ProtectionSystem.class, volume.getProtectionController());
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, volume.getStorageController());
List<UnManagedExportMask> unManagedRPExportMasks = findUnManagedRPExportMask(protectionSystem, unManagedVolume);
if (unManagedRPExportMasks.isEmpty()) {
_logger.error("Could not find any unmanaged export masks associated with volume: " + unManagedVolume.getLabel());
throw IngestionException.exceptions.noUnManagedExportMaskFound(unManagedVolume.getNativeGuid());
}
// Keep a map for internal site name name and varray
Map<String, VirtualArray> internalSiteToVarrayMap = new HashMap<String, VirtualArray>();
internalSiteToVarrayMap.put(volume.getInternalSiteName(), volumeContext.getVarray(unManagedVolume));
// If this is a MetroPoint volume we're going to have multiple ExportMasks/ExportGroups to deal with.
// We'll need to query the backend volumes for extra info to populate internalSiteToVarrayMap so
// we can properly line up the ExportMasks/ExportGroups.
boolean metropoint = RPHelper.isMetroPointVolume(_dbClient, volume);
if (metropoint) {
// We need the VPLEX ingest context to get the backend volume info
VplexVolumeIngestionContext vplexVolumeContext = ((RpVplexVolumeIngestionContext) volumeContext.getVolumeContext()).getVplexVolumeIngestionContext();
for (String associatedVolumeIdStr : vplexVolumeContext.getAssociatedVolumeIds(volume)) {
// Find the associated volumes using the context maps or the db if they are already there
Volume associatedVolume = VolumeIngestionUtil.findVolume(_dbClient, vplexVolumeContext.getBlockObjectsToBeCreatedMap(), vplexVolumeContext.getDataObjectsToBeUpdatedMap(), associatedVolumeIdStr);
String internalSiteName = associatedVolume.getInternalSiteName();
// If we don't already have an entry for this internal site name, let's add it now.
if (!internalSiteToVarrayMap.containsKey(internalSiteName)) {
internalSiteToVarrayMap.put(internalSiteName, _dbClient.queryObject(VirtualArray.class, associatedVolume.getVirtualArray()));
}
}
}
// this will more than likely only loop once.
for (Entry<String, VirtualArray> entry : internalSiteToVarrayMap.entrySet()) {
String internalSiteName = entry.getKey();
VirtualArray virtualArray = entry.getValue();
UnManagedExportMask em = null;
if (metropoint) {
// Since we're flagged for MetroPoint we need to determine which ExportMask to use.
// We need the MetroPoint volume to be added to BOTH ExportGroups that represent the
// two Storage Views on VPLEX for cluster-1 and cluster-2.
// So let's use the varray to find the cluster we're looking for on this pass and match
// it to the maskingViewParth of the UnManagedExportMask.
// This should line things up roughly as:
// VPLEX Storage View 1 -> VPLEX Cluster1 + RPA1
// VPLEX Storage View 2 -> VPLEX Cluster2 + RPA2
String vplexCluster = ConnectivityUtil.getVplexClusterForVarray(virtualArray.getId(), storageSystem.getId(), _dbClient);
// First try and match based on UnManagedExportMask ports
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
for (String portUri : exportMask.getKnownStoragePortUris()) {
StoragePort port = _dbClient.queryObject(StoragePort.class, URI.create(portUri));
if (port != null && !port.getInactive()) {
String vplexClusterForMask = ConnectivityUtil.getVplexClusterOfPort(port);
if (vplexCluster.equals(vplexClusterForMask)) {
em = exportMask;
break;
}
}
}
if (em != null) {
break;
}
}
if (em == null) {
// It really shouldn't come to this, but leaving this code just in case.
for (UnManagedExportMask exportMask : unManagedRPExportMasks) {
if (exportMask.getMaskingViewPath().contains("cluster-" + vplexCluster)) {
em = exportMask;
break;
}
}
}
} else {
em = unManagedRPExportMasks.get(0);
}
// If the mask for ingested volume is in a mask that contains JOURNAL keyword, make sure the ExportGroup created contains
// that internal flag.
boolean isJournalExport = false;
if (em.getMaskName().toLowerCase().contains(VolumeIngestionUtil.RP_JOURNAL)) {
isJournalExport = true;
}
String exportGroupGeneratedName = RPHelper.generateExportGroupName(protectionSystem, storageSystem, internalSiteName, virtualArray, isJournalExport);
ExportGroup exportGroup = VolumeIngestionUtil.verifyExportGroupExists(parentRequestContext, exportGroupGeneratedName, project.getId(), em.getKnownInitiatorUris(), virtualArray.getId(), _dbClient);
boolean exportGroupCreated = false;
if (null == exportGroup) {
exportGroupCreated = true;
Integer numPaths = em.getZoningMap().size();
_logger.info("Creating Export Group with label {}", em.getMaskName());
exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, virtualArray, project, numPaths, isJournalExport);
}
if (null != exportGroup) {
// check if the ExportGroup has already been fetched
ExportGroup loadedExportGroup = parentRequestContext.findExportGroup(exportGroup.getLabel(), project.getId(), virtualArray.getId(), null, null);
if (null != loadedExportGroup) {
exportGroup = loadedExportGroup;
}
}
volumeContext.setExportGroup(exportGroup);
volumeContext.setExportGroupCreated(exportGroupCreated);
volumeContext.getRpExportGroupMap().put(exportGroup, exportGroupCreated);
// set RP device initiators to be used as the "host" for export mask ingestion
List<Initiator> initiators = new ArrayList<Initiator>();
Iterator<Initiator> initiatorItr = _dbClient.queryIterativeObjects(Initiator.class, URIUtil.toURIList(em.getKnownInitiatorUris()));
while (initiatorItr.hasNext()) {
initiators.add(initiatorItr.next());
}
volumeContext.setDeviceInitiators(initiators);
// find the ingest export strategy and call into for this unmanaged export mask
IngestExportStrategy ingestStrategy = ingestStrategyFactory.buildIngestExportStrategy(unManagedVolume);
volume = ingestStrategy.ingestExportMasks(unManagedVolume, volume, volumeContext);
if (null == volume) {
// ingestion did not succeed, but in case it wasn't, throw one
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
}
}
Aggregations