use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class RPDeviceController method exportOrchestrationSteps.
/**
* @param volumeDescriptors
* - Volume descriptors
* @param rpSystemId
* - RP system
* @param taskId
* - task ID
* @return - True on success, false otherwise
* @throws InternalException
*/
public boolean exportOrchestrationSteps(List<VolumeDescriptor> volumeDescriptors, URI rpSystemId, String taskId) throws InternalException {
List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
RPCGExportOrchestrationCompleter completer = new RPCGExportOrchestrationCompleter(volUris, taskId);
Workflow workflow = null;
boolean lockException = false;
Map<URI, Set<URI>> exportGroupVolumesAdded = new HashMap<URI, Set<URI>>();
exportGroupsCreated = new ArrayList<URI>();
final String COMPUTE_RESOURCE_CLUSTER = "cluster";
try {
final String workflowKey = "rpExportOrchestration";
if (!WorkflowService.getInstance().hasWorkflowBeenCreated(taskId, workflowKey)) {
// Generate the Workflow.
workflow = _workflowService.getNewWorkflow(this, EXPORT_ORCHESTRATOR_WF_NAME, true, taskId);
// the wait for key returned by previous call
String waitFor = null;
ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, rpSystemId);
// Get the CG Params based on the volume descriptors
CGRequestParams params = this.getCGRequestParams(volumeDescriptors, rpSystem);
updateCGParams(params);
_log.info("Start adding RP Export Volumes steps....");
// Get the RP Exports from the CGRequestParams object
Collection<RPExport> rpExports = generateStorageSystemExportMaps(params, volumeDescriptors);
Map<String, Set<URI>> rpSiteInitiatorsMap = getRPSiteInitiators(rpSystem, rpExports);
// Acquire all the RP lock keys needed for export before we start assembling the export groups.
acquireRPLockKeysForExport(taskId, rpExports, rpSiteInitiatorsMap);
// or create a new one.
for (RPExport rpExport : rpExports) {
URI storageSystemURI = rpExport.getStorageSystem();
String internalSiteName = rpExport.getRpSite();
URI varrayURI = rpExport.getVarray();
List<URI> volumes = rpExport.getVolumes();
List<URI> initiatorSet = new ArrayList<URI>();
String rpSiteName = (rpSystem.getRpSiteNames() != null) ? rpSystem.getRpSiteNames().get(internalSiteName) : internalSiteName;
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayURI);
_log.info("--------------------");
_log.info(String.format("RP Export: StorageSystem = [%s] RPSite = [%s] VirtualArray = [%s]", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
boolean isJournalExport = rpExport.getIsJournalExport();
String exportGroupGeneratedName = RPHelper.generateExportGroupName(rpSystem, storageSystem, internalSiteName, varray, isJournalExport);
// Setup the export group - we may or may not need to create it, but we need to have everything ready in case we do
ExportGroup exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, varray, _dbClient.queryObject(Project.class, params.getProject()), 0, isJournalExport);
// Get the initiators of the RP Cluster (all of the RPAs on one side of a configuration)
Map<String, Map<String, String>> rpaWWNs = RPHelper.getRecoverPointClient(rpSystem).getInitiatorWWNs(internalSiteName);
if (rpaWWNs == null || rpaWWNs.isEmpty()) {
throw DeviceControllerExceptions.recoverpoint.noInitiatorsFoundOnRPAs();
}
// Convert to initiator object
List<Initiator> initiators = new ArrayList<Initiator>();
for (String rpaId : rpaWWNs.keySet()) {
for (Map.Entry<String, String> rpaWWN : rpaWWNs.get(rpaId).entrySet()) {
Initiator initiator = ExportUtils.getInitiator(rpaWWN.getKey(), _dbClient);
initiators.add(initiator);
}
}
// We need to find and distill only those RP initiators that correspond to the network of the
// storage
// system and
// that network has front end port from the storage system.
// In certain lab environments, its quite possible that there are 2 networks one for the storage
// system
// FE ports and one for
// the BE ports.
// In such configs, RP initiators will be spread across those 2 networks. RP controller does not
// care
// about storage system
// back-end ports, so
// we will ignore those initiators that are connected to a network that has only storage system back
// end
// port connectivity.
Map<URI, Set<Initiator>> rpNetworkToInitiatorsMap = new HashMap<URI, Set<Initiator>>();
Set<URI> rpSiteInitiatorUris = rpSiteInitiatorsMap.get(internalSiteName);
if (rpSiteInitiatorUris != null) {
for (URI rpSiteInitiatorUri : rpSiteInitiatorUris) {
Initiator rpSiteInitiator = _dbClient.queryObject(Initiator.class, rpSiteInitiatorUri);
URI rpInitiatorNetworkURI = getInitiatorNetwork(exportGroup, rpSiteInitiator);
if (rpInitiatorNetworkURI != null) {
if (rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI) == null) {
rpNetworkToInitiatorsMap.put(rpInitiatorNetworkURI, new HashSet<Initiator>());
}
rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI).add(rpSiteInitiator);
_log.info(String.format("RP Initiator [%s] found on network: [%s]", rpSiteInitiator.getInitiatorPort(), rpInitiatorNetworkURI.toASCIIString()));
} else {
_log.info(String.format("RP Initiator [%s] was not found on any network. Excluding from automated exports", rpSiteInitiator.getInitiatorPort()));
}
}
}
// Compute numPaths. This is how its done:
// We know the RP site and the Network/TransportZone it is on.
// Determine all the storage ports for the storage array for all the networks they are on.
// Next, if we find the network for the RP site in the above list, return all the storage ports
// corresponding to that.
// For RP we will try and use as many Storage ports as possible.
Map<URI, List<StoragePort>> initiatorPortMap = getInitiatorPortsForArray(rpNetworkToInitiatorsMap, storageSystemURI, varrayURI, rpSiteName);
for (URI networkURI : initiatorPortMap.keySet()) {
for (StoragePort storagePort : initiatorPortMap.get(networkURI)) {
_log.info(String.format("Network : [%s] - Port : [%s]", networkURI.toString(), storagePort.getLabel()));
}
}
int numPaths = computeNumPaths(initiatorPortMap, varrayURI, storageSystem);
_log.info("Total paths = " + numPaths);
// Stems from above comment where we distill the RP network and the initiators in that network.
List<Initiator> initiatorList = new ArrayList<Initiator>();
for (URI rpNetworkURI : rpNetworkToInitiatorsMap.keySet()) {
if (initiatorPortMap.containsKey(rpNetworkURI)) {
initiatorList.addAll(rpNetworkToInitiatorsMap.get(rpNetworkURI));
}
}
for (Initiator initiator : initiatorList) {
initiatorSet.add(initiator.getId());
}
// See if the export group already exists
ExportGroup exportGroupInDB = exportGroupExistsInDB(exportGroup);
boolean addExportGroupToDB = false;
if (exportGroupInDB != null) {
exportGroup = exportGroupInDB;
// If the export already exists, check to see if any of the volumes have already been exported.
// No
// need to
// re-export volumes.
List<URI> volumesToRemove = new ArrayList<URI>();
for (URI volumeURI : volumes) {
if (exportGroup.getVolumes() != null && !exportGroup.getVolumes().isEmpty() && exportGroup.getVolumes().containsKey(volumeURI.toString())) {
_log.info(String.format("Volume [%s] already exported to export group [%s], " + "it will be not be re-exported", volumeURI.toString(), exportGroup.getGeneratedName()));
volumesToRemove.add(volumeURI);
}
}
// Remove volumes if they have already been exported
if (!volumesToRemove.isEmpty()) {
volumes.removeAll(volumesToRemove);
}
// nothing else needs to be done here.
if (volumes.isEmpty()) {
_log.info(String.format("No volumes needed to be exported to export group [%s], continue", exportGroup.getGeneratedName()));
continue;
}
} else {
addExportGroupToDB = true;
}
// Add volumes to the export group
Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
for (URI volumeID : volumes) {
exportGroup.addVolume(volumeID, ExportGroup.LUN_UNASSIGNED);
volumesToAdd.put(volumeID, ExportGroup.LUN_UNASSIGNED);
}
// Keep track of volumes added to export group
if (!volumesToAdd.isEmpty()) {
exportGroupVolumesAdded.put(exportGroup.getId(), volumesToAdd.keySet());
}
// volume
if (rpExport.getComputeResource() != null) {
URI computeResource = rpExport.getComputeResource();
_log.info(String.format("RP Export: ComputeResource : %s", computeResource.toString()));
if (computeResource.toString().toLowerCase().contains(COMPUTE_RESOURCE_CLUSTER)) {
Cluster cluster = _dbClient.queryObject(Cluster.class, computeResource);
exportGroup.addCluster(cluster);
} else {
Host host = _dbClient.queryObject(Host.class, rpExport.getComputeResource());
exportGroup.addHost(host);
}
}
// Persist the export group
if (addExportGroupToDB) {
exportGroup.addInitiators(initiatorSet);
exportGroup.setNumPaths(numPaths);
_dbClient.createObject(exportGroup);
// Keep track of newly created EGs in case of rollback
exportGroupsCreated.add(exportGroup.getId());
} else {
_dbClient.updateObject(exportGroup);
}
// If the export group already exists, add the volumes to it, otherwise create a brand new
// export group.
StringBuilder buffer = new StringBuilder();
buffer.append(String.format(DASHED_NEWLINE));
if (!addExportGroupToDB) {
buffer.append(String.format("Adding volumes to existing Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is : [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
waitFor = _exportWfUtils.generateExportGroupAddVolumes(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd);
_log.info("Added Export Group add volumes step in workflow");
} else {
buffer.append(String.format("Creating new Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is: [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these initiators: [%s]%n", Joiner.on(',').join(initiatorSet)));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
String exportStep = workflow.createStepId();
initTaskStatus(exportGroup, exportStep, Operation.Status.pending, "create export");
waitFor = _exportWfUtils.generateExportGroupCreateWorkflow(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd, initiatorSet);
_log.info("Added Export Group create step in workflow. New Export Group Id: " + exportGroup.getId());
}
}
String successMessage = "Export orchestration completed successfully";
// Finish up and execute the plan.
// The Workflow will handle the TaskCompleter
Object[] callbackArgs = new Object[] { volUris };
workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
// Mark this workflow as created/executed so we don't do it again on retry/resume
WorkflowService.getInstance().markWorkflowBeenCreated(taskId, workflowKey);
}
} catch (LockRetryException ex) {
/**
* Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
* workflow using the same taskid.
*/
_log.warn(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
if (!wf.getCompleted()) {
_log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
wf.setCompleted(true);
_dbClient.updateObject(wf);
}
}
throw ex;
} catch (Exception ex) {
_log.error("Could not create volumes: " + volUris, ex);
// Rollback ViPR level RP export group changes
rpExportGroupRollback();
if (workflow != null) {
_workflowService.releaseAllWorkflowLocks(workflow);
}
String opName = ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME.getName();
ServiceError serviceError = null;
if (lockException) {
serviceError = DeviceControllerException.errors.createVolumesAborted(volUris.toString(), ex);
} else {
serviceError = DeviceControllerException.errors.createVolumesFailed(volUris.toString(), opName, ex);
}
completer.error(_dbClient, _locker, serviceError);
return false;
}
_log.info("End adding RP Export Volumes steps.");
return true;
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class FileDeviceController method assignFileReplicationPolicyToVirtualPools.
@Override
public void assignFileReplicationPolicyToVirtualPools(URI storageSystemURI, URI targetSystemURI, URI sourceVNasURI, URI targetVArrayURI, URI targetVNasURI, URI filePolicyToAssign, URI vpoolURI, String opId) throws ControllerException {
try {
WorkflowStepCompleter.stepExecuting(opId);
StorageSystem sourceSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
StorageSystem targetSystem = _dbClient.queryObject(StorageSystem.class, targetSystemURI);
FilePolicy filePolicy = _dbClient.queryObject(FilePolicy.class, filePolicyToAssign);
VirtualPool vpool = _dbClient.queryObject(VirtualPool.class, vpoolURI);
VirtualArray targetVarray = _dbClient.queryObject(VirtualArray.class, targetVArrayURI);
VirtualNAS sourceVNAS = null;
VirtualNAS targetVNAS = null;
FileDeviceInputOutput sourceArgs = new FileDeviceInputOutput();
FileDeviceInputOutput targetArgs = new FileDeviceInputOutput();
targetArgs.setVarray(targetVarray);
sourceArgs.setFileProtectionPolicy(filePolicy);
sourceArgs.setVPool(vpool);
if (sourceVNasURI != null) {
sourceVNAS = _dbClient.queryObject(VirtualNAS.class, sourceVNasURI);
sourceArgs.setvNAS(sourceVNAS);
targetArgs.setSourceVNAS(sourceVNAS);
}
targetArgs.setSourceSystem(sourceSystem);
targetArgs.setVPool(vpool);
targetArgs.setTarget(true);
if (targetVNasURI != null) {
targetVNAS = _dbClient.queryObject(VirtualNAS.class, targetVNasURI);
targetArgs.setvNAS(targetVNAS);
}
_log.info("Assigning file replication policy: {} to vpool: {}", filePolicyToAssign, vpoolURI);
BiosCommandResult result = getDevice(sourceSystem.getSystemType()).checkFileReplicationPolicyExistsOrCreate(sourceSystem, targetSystem, sourceArgs, targetArgs);
if (result.getCommandPending()) {
return;
}
if (!result.isCommandSuccess() && !result.getCommandPending()) {
WorkflowStepCompleter.stepFailed(opId, result.getServiceCoded());
}
if (result.isCommandSuccess()) {
WorkflowStepCompleter.stepSucceded(opId);
}
} catch (Exception e) {
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
WorkflowStepCompleter.stepFailed(opId, serviceError);
}
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class AbstractDefaultMaskingOrchestrator method determineInitiatorToExportMaskPlacements.
/**
* Routine will examine the ExportGroup object's ExportMask and the passed in map of
* compute-resource-to-initiators map to produce a mapping of the ExportMasks'
* initiator port name to a list of ExportMask URIs.
*
* @param exportGroup
* [in] - ExportGroup object to examine
* @param storage
* @param computeResourceToInitiators
* [in] - Mapping of compute resource string key to
* list of Initiator URIs. @return Map of String to set of URIs. The key will be
* Initiator.normalizePort(initiator.portName).
* @param partialMasks
* [out] - list of masks that were found to be "partial" masks, where there are multiple masks that make
* up one
* compute resource
* Value will be set of ExportMask URIs.
*/
protected Map<String, Set<URI>> determineInitiatorToExportMaskPlacements(ExportGroup exportGroup, URI storage, Map<String, List<URI>> computeResourceToInitiators, Map<String, Set<URI>> initiatorToExportMapOnArray, Map<String, URI> portNameToInitiatorURI, Set<URI> partialMasks) {
Map<String, Set<URI>> initiatorToExportMaskURIMap = new HashMap<String, Set<URI>>();
Map<String, Set<URI>> computeResourceToExportMaskMap = ExportMaskUtils.mapComputeResourceToExportMask(_dbClient, exportGroup, storage);
Set<URI> allExportMaskURIs = new HashSet<>();
// associated with the ExportGroup
for (Map.Entry<String, List<URI>> entry : computeResourceToInitiators.entrySet()) {
String computeResource = entry.getKey();
List<URI> initiatorSet = entry.getValue();
if (computeResourceToExportMaskMap.get(computeResource) != null) {
for (URI exportMaskURI : computeResourceToExportMaskMap.get(computeResource)) {
if (exportMaskURI == null) {
_log.info(String.format("determineInitiatorToExportMaskPlacements - No ExportMask for compute resource %s in ExportGroup %s", computeResource, exportGroup.getLabel()));
continue;
}
for (URI initiatorURI : initiatorSet) {
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
if (initiator == null) {
continue;
}
String normalizedName = Initiator.normalizePort(initiator.getInitiatorPort());
Set<URI> exportMaskURIs = initiatorToExportMaskURIMap.get(normalizedName);
if (exportMaskURIs == null) {
exportMaskURIs = new TreeSet<URI>();
initiatorToExportMaskURIMap.put(normalizedName, exportMaskURIs);
}
exportMaskURIs.add(exportMaskURI);
}
}
}
}
// when combined, make up a cluster masking view
for (Map.Entry<String, Set<URI>> entry : initiatorToExportMapOnArray.entrySet()) {
allExportMaskURIs.addAll(entry.getValue());
}
Collection<URI> volumes = new HashSet<URI>();
if (exportGroup.getVolumes() != null) {
volumes = Collections2.transform(exportGroup.getVolumes().keySet(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
}
ExportPathParams exportPathParams = _blockScheduler.calculateExportPathParamForVolumes(volumes, 0, storage, exportGroup.getId());
_log.info(String.format("determineInitiatorToExportMaskPlacements - ExportGroup=%s, exportPathParams=%s", exportGroup.getId().toString(), exportPathParams));
URI portGroup = exportPathParams.getPortGroup();
_log.info(String.format("Port group: %s", portGroup));
// Update mapping based on what is seen on the array
for (Map.Entry<String, Set<URI>> entry : initiatorToExportMapOnArray.entrySet()) {
String portName = entry.getKey();
// Validate this initiator and determine if it exists in the database
URI initiatorURI = portNameToInitiatorURI.get(portName);
if (initiatorURI == null) {
URIQueryResultList uris = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getInitiatorPortInitiatorConstraint(portName), uris);
if (!uris.iterator().hasNext()) {
// There is no such initiator
_log.info(String.format("determineInitiatorToExportMaskPlacements - Could not find initiator port %s in DB", portName));
continue;
}
initiatorURI = uris.iterator().next();
}
// We should have a non-null initiator URI at this point
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
if (initiator == null) {
_log.info(String.format("determineInitiatorToExportMaskPlacements - Initiator %s does not exist in DB", initiatorURI.toString()));
continue;
}
_log.info(String.format("determineInitiatorToExportMaskPlacements - Scanning masks that contain initiator %s to see if those masks qualify for consideration for re-use", initiator.getInitiatorPort()));
// This container is for capturing those ExportMasks that we find to
// be matching based on the initiators, but unusable based on the
// StoragePorts that the mask has. Basically, in order for a mask to
// be considered a match, the initiators have to match, but the
// StoragePorts have to be in the same Network as the ExportGroup's
// VArray Network.
Map<URI, Map<String, String>> masksWithUnmatchedStoragePorts = new HashMap<URI, Map<String, String>>();
// Take a look at the ExportMask's initiators to see what compute resource that they support.
String computeResource = ExportUtils.computeResourceForInitiator(exportGroup, initiator);
List<URI> uriList = computeResourceToInitiators.get(computeResource);
List<String> portsForComputeResource = new ArrayList<String>();
Iterator<Initiator> iterator = _dbClient.queryIterativeObjects(Initiator.class, uriList);
while (iterator.hasNext()) {
portsForComputeResource.add(iterator.next().getInitiatorPort());
}
// At this point we have a non-null initiator object that we can use in the mapping
Map<URI, Integer> maskToTotalMatchingPorts = new HashMap<URI, Integer>();
int totalPorts = 0;
Set<URI> candidateExportMaskURIs = new HashSet<URI>();
Set<URI> exportMaskURIs = entry.getValue();
for (URI exportMaskURI : exportMaskURIs) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
if (mask == null || mask.getInactive()) {
continue;
}
_log.info(String.format("determineInitiatorToExportMaskPlacements - Checking to see if we can consider mask %s, given its initiators, storage ports, and volumes", mask.getMaskName()));
// Check for NO_VIPR. If found, avoid this mask.
if (mask.getMaskName() != null && mask.getMaskName().toUpperCase().contains(ExportUtils.NO_VIPR)) {
_log.info(String.format("ExportMask %s disqualified because the name contains %s (in upper or lower case) to exclude it", mask.getMaskName(), ExportUtils.NO_VIPR));
continue;
}
Map<String, String> storagePortToNetworkName = new HashMap<String, String>();
if (mask.getCreatedBySystem()) {
if (mask.getResource().equals(computeResource)) {
if (maskHasPortGroup(mask, portGroup) && maskHasStoragePortsInExportVarray(exportGroup, mask, initiator, storagePortToNetworkName)) {
_log.info(String.format("determineInitiatorToExportMaskPlacements - ViPR-created mask %s qualifies for consideration for re-use", mask.getMaskName()));
candidateExportMaskURIs.add(exportMaskURI);
totalPorts += storagePortToNetworkName.keySet().size();
maskToTotalMatchingPorts.put(exportMaskURI, totalPorts);
// Ingest Fix : In ingest case, more than 1 export mask with createdBySystem flag set to
// true is possible
// remove the break statement
// break; First ViPR-created ExportMask associated with the resource, we will use
} else {
masksWithUnmatchedStoragePorts.put(exportMaskURI, storagePortToNetworkName);
_log.info(String.format("determineInitiatorToExportMaskPlacements - ViPR-created mask %s does not qualify for consideration for re-use due to storage ports mismatch with varray.", mask.getMaskName()));
}
} else {
_log.info(String.format("determineInitiatorToExportMaskPlacements - ViPR-created mask %s does not qualify for consideration for re-use due to compute resource mismatch.", mask.getMaskName()));
}
} else if (maskHasInitiatorsBasedOnExportType(exportGroup, mask, initiator, portsForComputeResource) || maskHasInitiatorsBasedOnExportType(exportGroup, mask, allExportMaskURIs, portsForComputeResource, partialMasks)) {
if (maskHasPortGroup(mask, portGroup) && maskHasStoragePortsInExportVarray(exportGroup, mask, initiator, storagePortToNetworkName)) {
_log.info(String.format("determineInitiatorToExportMaskPlacements - Pre-existing mask %s qualifies for consideration for re-use", mask.getMaskName()));
// This is a non-ViPR create ExportMask and it has the initiator
// as an existing initiator. Add it this as a matching candidate
candidateExportMaskURIs.add(exportMaskURI);
// We don't have zone ingest information for pre-existing masks, so for the purpose of
// matching more coexistence masks, we assume there are zones from every port to every
// initiator in the mask.
// Existing Initiators - initiators which are not userAdded.
int existingInitiators = mask.getExistingInitiators() == null ? 0 : mask.getExistingInitiators().size();
int userAddedInitators = mask.getUserAddedInitiators() == null ? 0 : mask.getUserAddedInitiators().size();
int totalInitiators = existingInitiators + userAddedInitators;
totalPorts += storagePortToNetworkName.keySet().size() * totalInitiators;
maskToTotalMatchingPorts.put(exportMaskURI, totalPorts);
} else {
masksWithUnmatchedStoragePorts.put(exportMaskURI, storagePortToNetworkName);
_log.info(String.format("determineInitiatorToExportMaskPlacements - Pre-existing mask %s does not qualify for consideration for re-use due to storage ports mismatch with varray.", mask.getMaskName()));
}
} else {
_log.info(String.format("determineInitiatorToExportMaskPlacements - Pre-existing mask %s does not qualify for consideration for re-use due to initiators not suitable for export group type.", mask.getMaskName()));
}
}
if (!candidateExportMaskURIs.isEmpty()) {
if (validateCandidateMasksAgainstExportPathParams(exportPathParams, maskToTotalMatchingPorts)) {
_log.info(String.format("determineInitiatorToExportMaskPlacements - Initiator %s (%s) will be mapped to these ExportMask URIs: %s", portName, initiatorURI.toString(), Joiner.on(',').join(exportMaskURIs)));
initiatorToExportMaskURIMap.put(portName, candidateExportMaskURIs);
}
} else {
if (masksWithUnmatchedStoragePorts.isEmpty()) {
_log.info(String.format("determineInitiatorToExportMaskPlacements - Could not find ExportMask to which %s can be associated", portName));
} else {
// We found matching exports on the array, but they were not viable due to
// the StoragePorts used (they were pointing to a different VArray), so we should
// warn the user in this case. We will likely still attempt to make a new mask,
// so if the user doesn't prefer that, this message will tell them why we took that
// path.
StringBuilder exportMaskInfo = new StringBuilder();
for (Map.Entry<URI, Map<String, String>> maskToStoragePortsEntry : masksWithUnmatchedStoragePorts.entrySet()) {
URI exportMaskURI = maskToStoragePortsEntry.getKey();
Map<String, String> storagePortToNetworks = maskToStoragePortsEntry.getValue();
ExportMask mask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
exportMaskInfo.append(String.format("MaskingView=%s StoragePorts [ %s ]%n", mask.getMaskName(), Joiner.on(',').join(storagePortToNetworks.entrySet())));
}
VirtualArray virtualArray = _dbClient.queryObject(VirtualArray.class, exportGroup.getVirtualArray());
Exception e = DeviceControllerException.exceptions.existingExportFoundButWithSPsInDifferentNetwork(virtualArray.getLabel(), exportMaskInfo.toString());
_log.warn(e.getMessage());
}
}
}
_log.info(String.format("determineInitiatorToExportMaskPlacements - initiatorToExportMaskURIMap: %s", Joiner.on(',').join(initiatorToExportMaskURIMap.entrySet())));
return initiatorToExportMaskURIMap;
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class AbstractDefaultMaskingOrchestrator method maskHasStoragePortsInExportVarray.
/**
* Routine validates if the ExportMask has StoragePorts that point to the same
* VArray as the ExportGroup's and that the Network associated with the StoragePorts
* matches those of the initiator.
*
* @param exportGroup
* [in] - ExportGroup object
* @param mask
* [in] - ExportMask object
* @param initiator
* [in] - Initiator object to validate
* @param storagePortToNetwork
* [out] - will populate the map with StoragePort.name to Network.Name
* @return true --> iff the ExportMask has viable StoragePorts that are associated to the ExportGroup's
* VArray and it matches the export path parameters of the ExportGroup
*/
private boolean maskHasStoragePortsInExportVarray(ExportGroup exportGroup, ExportMask mask, Initiator initiator, Map<String, String> storagePortToNetwork) {
boolean isMatched = false;
SetMultimap<URI, URI> initiatorToMatchedSP = TreeMultimap.create();
if (mask.getStoragePorts() != null) {
VirtualArray virtualArray = _dbClient.queryObject(VirtualArray.class, exportGroup.getVirtualArray());
// Look up the Initiator's network
NetworkLite initiatorNetwork = BlockStorageScheduler.lookupNetworkLite(_dbClient, StorageProtocol.block2Transport(initiator.getProtocol()), initiator.getInitiatorPort());
if (initiatorNetwork == null) {
_log.info(String.format("maskHasStoragePortsInExportVarray - Initiator %s is not in any network, returning false", initiator.getInitiatorPort()));
return false;
}
for (String uriString : mask.getStoragePorts()) {
URI uri = URI.create(uriString);
StoragePort port = _dbClient.queryObject(StoragePort.class, uri);
// Basic validation of the StoragePort
if (port == null || port.getInactive()) {
_log.info(String.format("maskHasStoragePortsInExportVarray - Could not find port or it is inactive %s", uri.toString()));
continue;
}
// StoragePort needs to be in the REGISTERED and VISIBLE status
if (!port.getRegistrationStatus().equals(StoragePort.RegistrationStatus.REGISTERED.name()) || port.getDiscoveryStatus().equals(DiscoveryStatus.NOTVISIBLE.name())) {
_log.info(String.format("maskHasStoragePortsInExportVarray - Port %s (%s) is not registered or not visible", port.getPortName(), uri.toString()));
continue;
}
// Look up the StoragePort's network
NetworkLite storagePortNetwork = BlockStorageScheduler.lookupNetworkLite(_dbClient, StorageProtocol.Transport.valueOf(port.getTransportType()), port.getPortNetworkId());
if (storagePortNetwork == null) {
_log.info(String.format("maskHasStoragePortsInExportVarray - Port %s (%s) is not associated with any network", port.getPortName(), uri.toString()));
storagePortToNetwork.put(port.getPortName(), UNASSOCIATED);
continue;
}
// Keep track of the StoragePort's network name
storagePortToNetwork.put(port.getPortName(), storagePortNetwork.getLabel());
// Port must belong to the VArray of the ExportGroup
if (!port.taggedToVirtualArray(exportGroup.getVirtualArray())) {
_log.info(String.format("maskHasStoragePortsInExportVarray - Port %s (%s) is not tagged to VArray %s (%s)", port.getPortName(), uri.toString(), virtualArray.getLabel(), exportGroup.getVirtualArray().toString()));
// reverted the fix, as the probability of Consistent lun violation will be more.
continue;
}
// Check if the StoragePort and Initiator point to the same Network
if (storagePortNetwork.connectedToNetwork(initiatorNetwork.getId())) {
_log.info(String.format("maskHasStoragePortsInExportVarray - StoragePort matches: VArray=%s (%s), StoragePort=%s, Network=%s, Initiator=%s", virtualArray.getLabel(), exportGroup.getVirtualArray().toString(), port.getPortName(), storagePortNetwork.getLabel(), initiator.getInitiatorPort()));
}
// Got here, so we can update the list of initiators to list of StoragePorts
// that show a relationship through the Network and the VArray
initiatorToMatchedSP.put(initiator.getId(), port.getId());
}
}
// Validate that the ExportMask is a positive match based on the StoragePorts
// that it references and the ExportGroups path parameters.
Set<URI> matchedSPs = initiatorToMatchedSP.get(initiator.getId());
isMatched = (matchedSPs != null && !matchedSPs.isEmpty());
_log.info(String.format("maskHasStoragePortsInExportVarray - Returning %s", isMatched));
return isMatched;
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class HostService method provisionBareMetalHosts.
/**
* Provision bare metal hosts by taking compute elements from the compute
* virtual pool.
*
* @param param
* parameter for multiple host creation
* @brief Provision bare metal hosts
* @return TaskResourceRep (asynchronous call)
* @throws DatabaseException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/provision-bare-metal")
public TaskList provisionBareMetalHosts(ProvisionBareMetalHostsParam param) throws DatabaseException {
ComputeVirtualPool cvp = _dbClient.queryObject(ComputeVirtualPool.class, param.getComputeVpool());
ArgValidator.checkEntity(cvp, param.getComputeVpool(), false);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, param.getVarray());
ArgValidator.checkEntity(varray, param.getVarray(), false);
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, param.getTenant());
ArgValidator.checkEntity(tenant, param.getTenant(), false);
if (!NullColumnValueGetter.isNullURI(param.getCluster())) {
Cluster cluster = _dbClient.queryObject(Cluster.class, param.getCluster());
ArgValidator.checkEntity(cluster, param.getCluster(), false);
}
_log.debug("checking if CVP is accessible");
_permissionsHelper.checkTenantHasAccessToComputeVirtualPool(tenant.getId(), cvp);
validateHostNames(param);
InterProcessLock lock = lockBladeReservation();
List<String> ceList = null;
try {
ceList = takeComputeElementsFromPool(cvp, param.getHostNames().size(), varray, param.getCluster());
} catch (Exception e) {
_log.error("unable to takeComputeElementsFromPool", e);
throw e;
} finally {
unlockBladeReservation(lock);
}
Set<Host> hosts = new HashSet<Host>();
for (int i = 0; i < param.getHostNames().size(); i++) {
Host host = populateHost(tenant, param.getHostNames().get(i), ceList.get(i), param.getCluster(), cvp.getId());
hosts.add(host);
_dbClient.createObject(host);
}
return createHostTasks(hosts, param.getComputeVpool(), param.getVarray());
}
Aggregations