use of com.emc.storageos.volumecontroller.impl.block.VplexBackEndMaskingOrchestrator in project coprhd-controller by CoprHD.
the class VPlexBackendManager method verifyExportMaskOnSystem.
/**
* Verify that an ExportMask that is going to be used is on the StorageSystem.
*
* @param mask
* @param array
*/
private void verifyExportMaskOnSystem(ExportMask mask, StorageSystem array) {
VplexBackEndMaskingOrchestrator maskingOrchestrator = getOrch(array);
BlockStorageDevice storageDevice = _blockDeviceController.getDevice(array.getSystemType());
// Make a list of Initiators used by the ExportMask. These could be initiators
// explicitly named in the Export Mask, or Initiators that match addresses in the existingInitiators
// fields. (The later occurs for externally created ExportMasks.)
List<Initiator> initiators = new ArrayList<Initiator>();
initiators.addAll(ExportMaskUtils.getInitiatorsForExportMask(_dbClient, mask, Transport.FC));
if (initiators.isEmpty()) {
initiators.addAll(ExportMaskUtils.getExistingInitiatorsForExportMask(_dbClient, mask, Transport.FC));
}
Map<URI, ExportMask> maskSet = maskingOrchestrator.readExistingExportMasks(array, storageDevice, initiators);
if (maskSet.containsKey(mask.getId())) {
_log.info(String.format("Verified ExportMask %s present on %s", mask.getMaskName(), array.getNativeGuid()));
return;
}
}
use of com.emc.storageos.volumecontroller.impl.block.VplexBackEndMaskingOrchestrator in project coprhd-controller by CoprHD.
the class VPlexBackendManager method addWorkflowStepsToRemoveBackendVolumes.
/**
* Remove a list of volumes from the ExportGroup specified.
*
* @param workflow
* = Workflow steps are to be added to
* @param waitFor
* - Wait for completion of this workflow step
* @param storage
* - Storage SclusterUnknownystem
* @param exportGroupURI-
* Export Group to be processed
* @param blockObjectList
* - list of volumes or snapshot (URIs)
* @return true if any steps added to Workflow
* @throws DeviceControllerException
*/
public boolean addWorkflowStepsToRemoveBackendVolumes(Workflow workflow, String waitFor, StorageSystem storage, URI exportGroupURI, List<URI> blockObjectList) throws DeviceControllerException {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
boolean stepsAdded = false;
// Read all the ExportMasks
Map<String, ExportMask> exportMasks = new HashMap<String, ExportMask>();
Map<String, List<URI>> maskToVolumes = new HashMap<String, List<URI>>();
List<ExportMask> egExportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup);
for (ExportMask mask : egExportMasks) {
if (mask == null || mask.getInactive()) {
continue;
}
exportMasks.put(mask.getId().toString(), mask);
maskToVolumes.put(mask.getId().toString(), new ArrayList<URI>());
}
// Put this information in the maskToVolumes map.
for (URI blockObjectURI : blockObjectList) {
for (ExportMask mask : exportMasks.values()) {
if (mask.hasVolume(blockObjectURI)) {
maskToVolumes.get(mask.getId().toString()).add(blockObjectURI);
} else {
_log.info(String.format("ExportMask %s (%s) does not contain volume %s", mask.getMaskName(), mask.getId(), blockObjectURI));
}
}
}
// Now process each Export Mask.
// refresh export masks per XIO storage array
boolean needRefresh = storage.deviceIsType(Type.xtremio);
String previousStepId = waitFor;
for (ExportMask mask : exportMasks.values()) {
List<URI> volumes = maskToVolumes.get(mask.getId().toString());
if (volumes.isEmpty()) {
_log.info("No volumes to remove for Export Mask: " + mask.getId());
continue;
}
previousStepId = waitFor;
// Verify the ExportMask is present on the system, or check if it was renamed
verifyExportMaskOnSystem(mask, storage);
if (mask.getCreatedBySystem()) {
_log.info(String.format("Generating unzoning step for ExportMask %s", mask.getMaskName()));
// Since this mask was created by the system, we want to unzone it.
List<URI> maskURIs = Collections.singletonList(mask.getId());
List<NetworkZoningParam> zoningParams = NetworkZoningParam.convertExportMasksToNetworkZoningParam(exportGroup.getId(), maskURIs, _dbClient);
Workflow.Method zoneRemoveMethod = _networkDeviceController.zoneExportRemoveVolumesMethod(zoningParams, volumes);
previousStepId = workflow.createStep(ZONING_STEP, String.format("Removing zones for ExportMask %s", mask.getMaskName()), previousStepId, nullURI, "network-system", _networkDeviceController.getClass(), zoneRemoveMethod, zoneRemoveMethod, null);
} else {
_log.info(String.format("ExportMask %s not created by ViPR; no unzoning step", mask.getMaskName()));
}
VplexBackEndMaskingOrchestrator orca = getOrch(storage);
List<URI> initiatorURIs = new ArrayList<>();
if (mask.getInitiators() != null) {
initiatorURIs = new ArrayList<URI>(Collections2.transform(mask.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
}
if (needRefresh) {
BlockStorageDevice device = _blockDeviceController.getDevice(storage.getSystemType());
device.refreshExportMask(storage, mask);
needRefresh = false;
}
Workflow.Method removeVolumesMethod = orca.deleteOrRemoveVolumesFromExportMaskMethod(storage.getId(), exportGroup.getId(), mask.getId(), volumes, initiatorURIs);
String stepId = workflow.createStepId();
workflow.createStep(EXPORT_STEP, String.format("Removing volume from ExportMask %s", mask.getMaskName()), previousStepId, storage.getId(), storage.getSystemType(), orca.getClass(), removeVolumesMethod, removeVolumesMethod, stepId);
_log.info(String.format("Generated remove volume from ExportMask %s for volumes %s", mask.getMaskName(), volumes));
stepsAdded = true;
}
return stepsAdded;
}
use of com.emc.storageos.volumecontroller.impl.block.VplexBackEndMaskingOrchestrator in project coprhd-controller by CoprHD.
the class VPlexBackendManager method addWorkflowStepsToAddBackendVolumes.
/**
* Add steps to generate the Workflow to add a volume to the VPLEX backend.
* The VNX is special, we do zoning after masking.
* For all the other arrays, we do zoning, then masking.
*
* @param workflow
* @param dependantStepId
* @param exportGroup
* @param exportMask
* @param volumeMap
* @param varrayURI
* @param vplex
* @param array
* @param forgetRollbackStepId
* @return String stepId of last added step
*/
public String addWorkflowStepsToAddBackendVolumes(Workflow workflow, String dependantStepId, ExportGroup exportGroup, ExportMask exportMask, Map<URI, Volume> volumeMap, URI varrayURI, StorageSystem vplex, StorageSystem array, String forgetRollbackStepId) {
// Determine if VNX or OpenStack so can order VNX zoning after masking
boolean isMaskingFirst = isMaskingFirst(array);
boolean isOpenStack = isOpenStack(array);
Map<URI, Integer> volumeLunIdMap = createVolumeMap(array.getId(), volumeMap);
String zoningStep = null;
String maskStepId = workflow.createStepId();
String reValidateExportMaskStep = workflow.createStepId();
ExportMaskAddVolumeCompleter createCompleter = new ExportMaskAddVolumeCompleter(exportGroup.getId(), exportMask.getId(), volumeLunIdMap, maskStepId, forgetRollbackStepId);
List<URI> volumeList = new ArrayList<>();
volumeList.addAll(volumeLunIdMap.keySet());
String previousStepId = dependantStepId;
String zoningDependentStep = ((isMaskingFirst && isOpenStack) ? reValidateExportMaskStep : ((isMaskingFirst && !isOpenStack) ? maskStepId : previousStepId));
if (exportMask.getCreatedBySystem()) {
_log.info(String.format("Creating zone references for Backend ExportMask %s", exportMask.getMaskName()));
List<URI> maskURIs = Collections.singletonList(exportMask.getId());
List<NetworkZoningParam> zoningParams = NetworkZoningParam.convertExportMasksToNetworkZoningParam(exportGroup.getId(), maskURIs, _dbClient);
HashSet<URI> volumes = new HashSet<URI>(volumeLunIdMap.keySet());
Workflow.Method zoneCreateMethod = _networkDeviceController.zoneExportAddVolumesMethod(exportGroup.getId(), maskURIs, volumes);
Workflow.Method zoneDeleteMethod = _networkDeviceController.zoneExportRemoveVolumesMethod(zoningParams, volumes);
zoningStep = workflow.createStep(ZONING_STEP, String.format("Adding zones for ExportMask %s", exportMask.getMaskName()), zoningDependentStep, nullURI, "network-system", _networkDeviceController.getClass(), zoneCreateMethod, zoneDeleteMethod, null);
if (!isMaskingFirst) {
previousStepId = zoningStep;
}
}
// Initiators that are sent down for export validation are the known initiators.
// For back-end VPLEX masks, I find that userAddedInitiators are not getting filled-in,
// so we're playing it safe by using known initiators.
List<URI> initiatorURIs = new ArrayList<>();
if (exportMask.getInitiators() != null) {
initiatorURIs = new ArrayList<URI>(Collections2.transform(exportMask.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
}
VplexBackEndMaskingOrchestrator orca = getOrch(array);
Workflow.Method updateMaskMethod = orca.createOrAddVolumesToExportMaskMethod(array.getId(), exportGroup.getId(), exportMask.getId(), volumeLunIdMap, initiatorURIs, createCompleter);
Workflow.Method rollbackMaskMethod = orca.deleteOrRemoveVolumesFromExportMaskMethod(array.getId(), exportGroup.getId(), exportMask.getId(), volumeList, initiatorURIs);
workflow.createStep(EXPORT_STEP, "createOrAddVolumesToExportMask: " + exportMask.getMaskName(), previousStepId, array.getId(), array.getSystemType(), orca.getClass(), updateMaskMethod, rollbackMaskMethod, maskStepId);
// This is required as the export mask gets updated by reading the cinder response.
if (isOpenStack) {
// START - updateZoningMapAndValidateExportMask Step
Workflow.Method updatezoningAndvalidateMaskMethod = ((VplexCinderMaskingOrchestrator) orca).updateZoningMapAndValidateExportMaskMethod(varrayURI, _initiatorPortMap, exportMask.getId(), _directorToInitiatorIds, _idToInitiatorMap, _portWwnToClusterMap, vplex, array, _cluster);
workflow.createStep(REVALIDATE_MASK, "updatezoningAndrevalidateExportMask: " + exportMask.getMaskName(), maskStepId, array.getId(), array.getSystemType(), orca.getClass(), updatezoningAndvalidateMaskMethod, rollbackMaskMethod, reValidateExportMaskStep);
// END - updateZoningMapAndValidateExportMask Step
}
_log.info(String.format("VPLEX ExportGroup %s (%s) vplex %s varray %s", exportGroup.getLabel(), exportGroup.getId(), vplex.getId(), exportGroup.getVirtualArray()));
return (isMaskingFirst && zoningStep != null) ? zoningStep : maskStepId;
}
use of com.emc.storageos.volumecontroller.impl.block.VplexBackEndMaskingOrchestrator in project coprhd-controller by CoprHD.
the class VPlexBackendManager method chooseBackendExportMask.
/**
* Choose one of the existing Export Masks (on VMAX: masking views) if possible in
* which to place the volume to be exported to the VPlex. Otherwise ExportMask(s)
* will be generated and one will be chosen from the generated set.
*
* @param vplex
* [IN] - VPlex storage system
* @param array
* [IN] - Storage Array storage system
* @param varrayURI
* [IN] - Virtual array
* @param volumeMap
* [IN] - Map of URI to their corresponding Volume object
* @param stepId
* the workflow step id used find the workflow where the existing zone information is stored
* @return ExportMaskPlacementDescriptor - data structure that will indicate the mapping of ExportMasks to
* ExportGroups and ExportMasks to Volumes.
* @throws ControllerException
*/
public ExportMaskPlacementDescriptor chooseBackendExportMask(StorageSystem vplex, StorageSystem array, URI varrayURI, Map<URI, Volume> volumeMap, String stepId) throws ControllerException {
_log.info(String.format("Searching for existing ExportMasks between Vplex %s (%s) and Array %s (%s) in Varray %s", vplex.getLabel(), vplex.getNativeGuid(), array.getLabel(), array.getNativeGuid(), varrayURI));
long startTime = System.currentTimeMillis();
// The volumeMap can contain volumes from different arrays. We are interested only in the ones for 'array'.
Map<URI, Volume> volumesForArray = filterVolumeMap(volumeMap, array);
// Build the data structures used for analysis and validation.
buildDataStructures(vplex, array, varrayURI);
VplexBackEndMaskingOrchestrator vplexBackendOrchestrator = getOrch(array);
BlockStorageDevice storageDevice = _blockDeviceController.getDevice(array.getSystemType());
// Lock operation.
String lockName = _vplexApiLockManager.getLockName(vplex.getId(), _cluster, array.getId());
boolean lockAcquired = false;
try {
if (_vplexApiLockManager != null) {
lockAcquired = _vplexApiLockManager.acquireLock(lockName, MAX_LOCK_WAIT_SECONDS);
if (!lockAcquired) {
_log.info("Timed out waiting on lock- PROCEEDING ANYWAY!");
}
}
// Initialize the placement data structure
ExportMaskPlacementDescriptor placementDescriptor = ExportMaskPlacementDescriptor.create(_tenantURI, _projectURI, vplex, array, varrayURI, volumesForArray, _idToInitiatorMap.values());
// VplexBackEndMaskingOrchestrator#suggestExportMasksForPlacement should fill in the rest of the
// placement data structures, such that decisions on how to reuse the ExportMasks can be done here.
// At a minimum, this placement is done based on reading the ExportMasks off the backend array based on the
// initiators.
// Customizations can be done per array based on other factors. Most notably, for the case of VMAX, this
// would
// place volumes in appropriate ExportMasks based on the volume's AutoTieringPolicy relationship.
vplexBackendOrchestrator.suggestExportMasksForPlacement(array, storageDevice, _initiators, placementDescriptor);
// Apply the filters that will remove any ExportMasks that do not fit the expected VPlex masking paradigm
Set<URI> invalidMasks = filterExportMasksByVPlexRequirements(vplex, array, varrayURI, placementDescriptor);
// If there were any invalid masks found, we can redo the volume placement into
// an alternative ExportMask (if there are any listed by the descriptor)
putUnplacedVolumesIntoAlternativeMask(placementDescriptor);
// If not, we will attempt to generate some.
if (!placementDescriptor.hasMasks()) {
_log.info("There weren't any ExportMasks in the placementDescriptor. Creating new ExportMasks for the volumes.");
// Did not find any reusable ExportMasks. Either there were some that matched initiators, but did not
// meeting the
// VPlex criteria, or there were no existing masks for the backend at all.
Map<URI, Volume> volumesToPlace = placementDescriptor.getVolumesToPlace();
createVPlexBackendExportMasksForVolumes(vplex, array, varrayURI, placementDescriptor, invalidMasks, volumesToPlace, stepId);
} else if (placementDescriptor.hasUnPlacedVolumes()) {
_log.info("There were some reusable ExportMasks found, but not all volumes got placed. Will create an ExportMask to " + "hold these unplaced volumes.");
// There were some matching ExportMasks found on the backend array, but we also have some
// unplaced
// volumes. We need to create new ExportMasks to hold these unplaced volumes.
// We will leave the placement hint to whatever was determined by the suggestExportMasksForPlacement
// call
Map<URI, Volume> unplacedVolumes = placementDescriptor.getUnplacedVolumes();
createVPlexBackendExportMasksForVolumes(vplex, array, varrayURI, placementDescriptor, invalidMasks, unplacedVolumes, stepId);
}
// At this point, we have:
//
// a). Requested that the backend StorageArray provide us with a list of ExportMasks that can support the
// initiators + volumes.
// b). Processed the suggested ExportMasks in case they had their names changed
// c). Filtered out any ExportMasks that do not fit the VPlex masking paradigm
// OR
// d). Created a set of new ExportMasks to support the initiators + volumes
//
// We will now run the final placement based on a strategy determined by looking at the placementDescriptor
VPlexBackendPlacementStrategyFactory.create(_dbClient, placementDescriptor).execute();
long elapsed = System.currentTimeMillis() - startTime;
_log.info(String.format("PlacementDescriptor processing took %f seconds", (double) elapsed / (double) 1000));
_log.info(String.format("PlacementDescriptor was created:%n%s", placementDescriptor.toString()));
return placementDescriptor;
} finally {
if (lockAcquired) {
_vplexApiLockManager.releaseLock(lockName);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.VplexBackEndMaskingOrchestrator in project coprhd-controller by CoprHD.
the class VPlexBackendManager method generateExportMasks.
private Map<ExportMask, ExportGroup> generateExportMasks(URI varrayURI, StorageSystem vplex, StorageSystem array, String stepId, StringBuilder errorMessages) {
// Build the data structures used for analysis and validation.
buildDataStructures(vplex, array, varrayURI);
// Assign initiators to hosts
String clusterName = getClusterName(vplex);
Set<Map<String, Map<URI, Set<Initiator>>>> initiatorGroups = getInitiatorGroups(clusterName, _directorToInitiatorIds, _initiatorIdToNetwork, _idToInitiatorMap, array.getSystemType().equals(SystemType.vnxblock.name()), false);
// First we must determine the Initiator Groups and PortGroups to be used.
VplexBackEndMaskingOrchestrator orca = getOrch(array);
// set VPLEX director count to set number of paths per director
if (orca instanceof VplexXtremIOMaskingOrchestrator) {
// get VPLEX director count
int directorCount = getVplexDirectorCount(initiatorGroups);
((VplexXtremIOMaskingOrchestrator) orca).setVplexDirectorCount(directorCount);
}
// get the allocatable ports - if the custom config requests pre-zoned ports to be used
// get the existing zones in zonesByNetwork
Map<NetworkLite, StringSetMap> zonesByNetwork = new HashMap<NetworkLite, StringSetMap>();
Map<URI, List<StoragePort>> allocatablePorts = getAllocatablePorts(array, _networkMap.keySet(), varrayURI, zonesByNetwork, stepId);
Map<ExportMask, ExportGroup> exportMasksMap = new HashMap<ExportMask, ExportGroup>();
if (allocatablePorts.isEmpty()) {
String message = "No allocatable ports found for export to VPLEX backend. ";
_log.warn(message);
if (errorMessages != null) {
errorMessages.append(message);
}
_log.warn("Returning empty export mask map because no allocatable ports could be found.");
return exportMasksMap;
}
Map<URI, Map<String, Integer>> switchToPortNumber = getSwitchToMaxPortNumberMap(array);
Set<Map<URI, List<List<StoragePort>>>> portGroups = orca.getPortGroups(allocatablePorts, _networkMap, varrayURI, initiatorGroups.size(), switchToPortNumber, null, errorMessages);
// Now generate the Masking Views that will be needed.
Map<URI, String> initiatorSwitchMap = new HashMap<URI, String>();
Map<URI, Map<String, List<StoragePort>>> switchStoragePortsMap = new HashMap<URI, Map<String, List<StoragePort>>>();
Map<URI, List<StoragePort>> storageports = getStoragePorts(portGroups);
Map<URI, String> portSwitchMap = new HashMap<URI, String>();
PlacementUtils.getSwitchNameForInititaorsStoragePorts(_initiators, storageports, _dbClient, array, initiatorSwitchMap, switchStoragePortsMap, portSwitchMap);
Iterator<Map<String, Map<URI, Set<Initiator>>>> igIterator = initiatorGroups.iterator();
// get the assigner needed - it is with a pre-zoned ports assigner or the default
StoragePortsAssigner assigner = StoragePortsAssignerFactory.getAssignerForZones(array.getSystemType(), zonesByNetwork);
for (Map<URI, List<List<StoragePort>>> portGroup : portGroups) {
String maskName = clusterName.replaceAll("[^A-Za-z0-9_]", "_");
_log.info("Generating ExportMask: " + maskName);
if (!igIterator.hasNext()) {
igIterator = initiatorGroups.iterator();
}
Map<String, Map<URI, Set<Initiator>>> initiatorGroup = igIterator.next();
StringSetMap zoningMap = orca.configureZoning(portGroup, initiatorGroup, _networkMap, assigner, initiatorSwitchMap, switchStoragePortsMap, portSwitchMap);
ExportMask exportMask = generateExportMask(array.getId(), maskName, portGroup, initiatorGroup, zoningMap);
// Set a flag indicating that we do not want to remove zoningMap entries
StringSetMap map = new StringSetMap();
StringSet values = new StringSet();
values.add(Boolean.TRUE.toString());
map.put(ExportMask.DeviceDataMapKeys.ImmutableZoningMap.name(), values);
if (array.getSystemType().equals(SystemType.vmax.name())) {
// If VMAX, set consisteLUNs = false
values = new StringSet();
values.add(Boolean.FALSE.toString());
map.put(ExportMask.DeviceDataMapKeys.VMAXConsistentLUNs.name(), values);
}
exportMask.addDeviceDataMap(map);
// Create an ExportGroup for the ExportMask.
List<Initiator> initiators = new ArrayList<Initiator>();
for (String director : initiatorGroup.keySet()) {
for (URI networkURI : initiatorGroup.get(director).keySet()) {
for (Initiator initiator : initiatorGroup.get(director).get(networkURI)) {
initiators.add(initiator);
}
}
}
_dbClient.createObject(exportMask);
ExportGroup exportGroup = ExportUtils.createVplexExportGroup(_dbClient, vplex, array, initiators, varrayURI, _projectURI, _tenantURI, 0, exportMask);
exportMasksMap.put(exportMask, exportGroup);
}
return exportMasksMap;
}
Aggregations