use of com.emc.storageos.volumecontroller.impl.utils.ObjectLocalCache in project coprhd-controller by CoprHD.
the class VirtualArrayService method getAvailableAttributes.
/**
* Finds the available attributes & its values in a varray. Ex: In a
* varray, if a system supports raid_levels such as RAID1, RAID2 then
* this API call provides the supported information.
*
* @brief List available attributes for all VirtualArrays
* @return List available attributes for all VirtualArrays
*/
@POST
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/available-attributes")
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR })
public VArrayAttributeList getAvailableAttributes(BulkIdParam param) {
_log.info("Finding the available attributes for all varray: {}");
VArrayAttributeList vArrayAttributes = new VArrayAttributeList();
ObjectLocalCache cache = new ObjectLocalCache(_dbClient);
Map<URI, List<StoragePool>> allPools = getVirtualArrayPools(param.getIds(), cache);
for (Map.Entry<URI, List<StoragePool>> varrEntry : allPools.entrySet()) {
Map<String, Set<String>> availableAttrs = _matcherFramework.getAvailableAttributes(varrEntry.getKey(), varrEntry.getValue(), cache, AttributeMatcher.VPOOL_MATCHERS);
AttributeList list = new AttributeList();
list.setVArrayId(varrEntry.getKey());
for (Map.Entry<String, Set<String>> entry : availableAttrs.entrySet()) {
list.getAttributes().add(new VirtualPoolAvailableAttributesResourceRep(entry.getKey(), entry.getValue()));
}
if (!list.getAttributes().isEmpty()) {
vArrayAttributes.getAttributes().add(list);
}
}
cache.clearCache();
return vArrayAttributes;
}
use of com.emc.storageos.volumecontroller.impl.utils.ObjectLocalCache in project coprhd-controller by CoprHD.
the class VirtualArrayService method getAvailableAttributes.
/**
* Finds the available attributes & its values in a varray. Ex: In a
* varray, if a system supports raid_levels such as RAID1, RAID2 then
* this API call provides the supported information.
*
* @param id the URN of a ViPR VirtualArray.
* @brief List available attributes for VirtualArray
* @return List available attributes for VirtualArray
*/
@GET
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/available-attributes")
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR })
public AttributeList getAvailableAttributes(@PathParam("id") URI id) {
// Get and validate the varray with the passed id.
ArgValidator.checkFieldUriType(id, VirtualArray.class, "id");
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, id);
ArgValidator.checkEntityNotNull(varray, id, isIdEmbeddedInURL(id));
_log.info("Finding the available attributes for varray: {}", id);
AttributeList list = new AttributeList();
list.setVArrayId(id);
ObjectLocalCache cache = new ObjectLocalCache(_dbClient);
List<StoragePool> pools = getVirtualArrayPools(Arrays.asList(id), cache).get(id);
Map<String, Set<String>> availableAttrs = _matcherFramework.getAvailableAttributes(id, pools, cache, AttributeMatcher.VPOOL_MATCHERS);
cache.clearCache();
for (Map.Entry<String, Set<String>> entry : availableAttrs.entrySet()) {
list.getAttributes().add(new VirtualPoolAvailableAttributesResourceRep(entry.getKey(), entry.getValue()));
}
return list;
}
use of com.emc.storageos.volumecontroller.impl.utils.ObjectLocalCache in project coprhd-controller by CoprHD.
the class SRDFScheduler method filterPoolsForSupportedActiveModeProvider.
private List<StoragePool> filterPoolsForSupportedActiveModeProvider(List<StoragePool> candidatePools, VirtualPool vpool) {
Map<URI, VpoolRemoteCopyProtectionSettings> remoteProtectionSettings = vpool.getRemoteProtectionSettings(vpool, _dbClient);
if (remoteProtectionSettings != null) {
for (URI varrayURI : remoteProtectionSettings.keySet()) {
VpoolRemoteCopyProtectionSettings remoteCopyProtectionSettings = remoteProtectionSettings.get(varrayURI);
String copyMode = remoteCopyProtectionSettings.getCopyMode();
if (copyMode.equals(SupportedCopyModes.ACTIVE.toString())) {
SRDFMetroMatcher srdfMetroMatcher = new SRDFMetroMatcher();
srdfMetroMatcher.setCoordinatorClient(_coordinator);
srdfMetroMatcher.setObjectCache(new ObjectLocalCache(_dbClient, false));
return srdfMetroMatcher.filterPoolsForSRDFActiveMode(candidatePools);
}
}
}
return candidatePools;
}
use of com.emc.storageos.volumecontroller.impl.utils.ObjectLocalCache in project coprhd-controller by CoprHD.
the class StorageScheduler method getPoolsMatchingCapacity.
/**
* Returns all storage pools from the passed list of candidate storage
* pools that have at least the passed free capacity.
* Note: do not change order of candidate pools.
*
* @param capacity The desired free capacity.
* @param resourceSize The desired resource size
* @param newResourceCount The desired number of resources
* @param candidatePools The list of candidate storage pools.
* @param isThinlyProvisioned Indication if this is thin provisioning (thin volume).
*
* @return All storage pools that have the passed free capacity.
*/
protected List<StoragePool> getPoolsMatchingCapacity(long capacity, long resourceSize, Integer newResourceCount, List<StoragePool> candidatePools, boolean isThinlyProvisioned, Long thinVolumePreAllocationResourceSize) {
List<StoragePool> poolsWithCapacity = new ArrayList<StoragePool>();
CapacityMatcher capacityMatcher = new CapacityMatcher();
capacityMatcher.setCoordinatorClient(_coordinator);
capacityMatcher.setObjectCache(new ObjectLocalCache(_dbClient, false));
for (StoragePool candidatePool : candidatePools) {
// First check if max Resources limit is violated for the pool
if (MaxResourcesMatcher.checkPoolMaximumResourcesApproached(candidatePool, _dbClient, newResourceCount)) {
continue;
}
// continue;
if (capacityMatcher.poolMatchesCapacity(candidatePool, capacity, resourceSize, false, isThinlyProvisioned, thinVolumePreAllocationResourceSize)) {
poolsWithCapacity.add(candidatePool);
}
}
return poolsWithCapacity;
}
use of com.emc.storageos.volumecontroller.impl.utils.ObjectLocalCache in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method suggestExportMasksForPlacement.
@Override
protected void suggestExportMasksForPlacement(StorageSystem storage, BlockStorageDevice device, List<Initiator> initiators, ExportMaskPlacementDescriptor descriptor) {
// VMAX can have multiple ExportMasks (MaskingViews) that contain the same set of initiators.
// So, it's up to this implementation to determine where to best place the volumes based
// on volume and ExportMask characteristics. To that end, we will hint that the placement
// will be separating the volumes per ExportMask
descriptor.setPlacementHint(ExportMaskPlacementDescriptor.PlacementHint.VOLUMES_TO_SEPARATE_MASKS);
// Find all the ExportMasks on the array that have the initiators (or a subset of them)
Map<URI, ExportMask> matchingMasks = readExistingExportMasks(storage, device, initiators);
// filter out export masks which do not have at least one port from the virtual array
Set<URI> invalidMasks = new HashSet<URI>();
VirtualArray virtualArray = _dbClient.queryObject(VirtualArray.class, descriptor.getVirtualArray());
for (Entry<URI, ExportMask> entry : matchingMasks.entrySet()) {
ExportMask mask = entry.getValue();
boolean matched = maskHasStoragePortsInExportVarray(virtualArray, mask);
if (!matched) {
invalidMasks.add(entry.getKey());
_log.info("Mask does not have valid ports from varray: {}", mask.getLabel());
}
}
for (URI maskUri : invalidMasks) {
matchingMasks.remove(maskUri);
}
// set matching masks in the descriptor
descriptor.setMasks(matchingMasks);
if (matchingMasks.isEmpty()) {
return;
}
// Dummy/non-essential data
ExportGroup dummyExportGroup = new ExportGroup();
dummyExportGroup.setType(ExportGroupType.Host.name());
// InitiatorHelper for processing the initiators
InitiatorHelper initiatorHelper = new InitiatorHelper(initiators).process(dummyExportGroup);
// Create and fill in:
// -- Mapping of initiators to ExportMask URIs
// -- Set of ExportMasks that match partially to the initiator set
// -- Mapping of ExportMask URI to ExportMaskPolicy
Set<URI> partialMasks = new HashSet<>();
Map<String, Set<URI>> initiatorToExportMaskMap = new HashMap<>();
Map<URI, ExportMaskPolicy> policyCache = new HashMap<>();
Collection<String> portNames = Collections2.transform(initiators, CommonTransformerFunctions.fctnInitiatorToPortName());
for (Map.Entry<URI, ExportMask> entry : matchingMasks.entrySet()) {
URI exportMaskURI = entry.getKey();
ExportMask exportMask = entry.getValue();
// Get the ExportMaskPolicy, thereby saving it to the policyCache. The policyCache is a mapping of the ExportMask URI to
// its ExportMaskPolicy object. The ExportMaskPolicy is a transient object that is used to hold meta data about the ExportMask.
// This meta data is mostly there to describe the AutoTieringPolicy, Host IO parameters, and InitiatorGroup usage.
// There could be other information, as well. It suffices to understand that this data is relevant for the rules applicator
// that we're invoking below. The rules applicator will use this as a way to determine which ExportMask is best
// suited to hold the volumes.
getExportMaskPolicy(policyCache, device, storage, exportMask);
// Populate the mapping of Initiator portname (WWN/IQN) to the ExportMask URI
for (String portName : portNames) {
Set<URI> masks = initiatorToExportMaskMap.get(portName);
if (masks == null) {
masks = new HashSet<>();
initiatorToExportMaskMap.put(portName, masks);
}
masks.add(exportMaskURI);
}
// export to, then we need to put it in the set of masks that have a partial match
if (!ExportMaskUtils.hasExactlyTheseInitiators(exportMask, portNames, _dbClient)) {
partialMasks.add(exportMaskURI);
}
// Determine which ExportMasks are equivalent in terms of attributes, other than
// the number of volumes that they contain. The preference is for the rules
// applicator (below) to choose, from equivalent masks, the one with the least
// volumes. But we'd like to still know which are equivalent in case the mask
// that is selected in the code here, is invalid in some higher level validation.
descriptor.addToEquivalentMasks(exportMask, policyCache.get(exportMaskURI));
}
// Populate the Volume URI to Volume HLU mapping. We will let the array decide the HLUs (i.e., set it to -1)
Map<URI, Integer> volumeMap = new HashMap<>();
for (URI volumeURI : descriptor.getVolumesToPlace().keySet()) {
volumeMap.put(volumeURI, -1);
}
// Mapping of ExportMask URI to Volume-HLU: the basic output that we're expecting to be filled in by the rules applicator
Map<URI, Map<URI, Integer>> maskToUpdateWithNewVolumes = new HashMap<>();
// All data structures should have been filled in at this point; create the context and ruleApplicator for it
VmaxVolumeToExportMaskApplicatorContext context = createVPlexBackendApplicatorContext(dummyExportGroup, storage, policyCache, initiatorHelper, initiatorToExportMaskMap, partialMasks, volumeMap, maskToUpdateWithNewVolumes);
VplexBackendVolumeToExportMaskRuleApplicator rulesApplicator = new VplexBackendVolumeToExportMaskRuleApplicator(_dbClient, context);
try {
rulesApplicator.run();
if (context.resultSuccess) {
// Get configuration value for how many volumes are allowed in MaskingView. If the number
// of volumes exceeds this amount for a particular ExportMask, then it cannot be a candidate
// for reuse.
customConfigHandler = (CustomConfigHandler) ControllerServiceImpl.getBean(CUSTOM_CONFIG_HANDLER);
int maxVolumesAllowedByConfig = Integer.valueOf(customConfigHandler.getComputedCustomConfigValue(CustomConfigConstants.VPLEX_VMAX_MASKING_VIEW_MAXIMUM_VOLUMES, storage.getSystemType(), null));
// Use a local cache in case the same volumes are selected
// to be placed into different ExportMasks
ObjectLocalCache cache = new ObjectLocalCache(_dbClient);
// Process each entry in the mapping of ExportMask to Volumes ...
for (Map.Entry<URI, Map<URI, Integer>> entry : maskToUpdateWithNewVolumes.entrySet()) {
URI exportMaskURI = entry.getKey();
Set<URI> volumeURIs = entry.getValue().keySet();
// The ExportMaskPolicy is a transient object that is used to hold meta data about the ExportMask.
// This meta data is mostly there to describe the AutoTieringPolicy, Host IO parameters, and InitiatorGroup usage.
// There could be other information, as well.
ExportMaskPolicy policy = policyCache.get(exportMaskURI);
// Translate the Volume URI to Volume HLU map to a Volume URI to Volume object map:
Map<URI, Volume> volumes = new HashMap<>();
List<Volume> queriedVols = cache.queryObject(Volume.class, volumeURIs);
for (Volume volume : queriedVols) {
volumes.put(volume.getId(), volume);
}
// TODO: We need to explore if we should/can make the volume count check (done below) another rule run as
// part of the RuleApplicator. The one concern with doing this is what would happen if another ExportMask
// is selected. Would we end up selecting an ExportMask that can support the volumes, but is undesirable
// through some other considerations. For now, we will let the rules engine decide the appropriate
// ExportMasks and then evaluate that selection for the volume count.
// Validate the number of volumes that will be in the ExportMask if 'volumes' were added to it.
// If there are more the maximum number of volumes allowed, then we should not place these 'volumes'.
// The volumes would show up in the descriptor as being unplaced. The VplexBackendManager would
// to take care of this case by creating another ExportMask to contain these volumes.
ExportMask exportMask = matchingMasks.get(exportMaskURI);
int totalVolumesWhenAddedToExportMask = exportMask.returnTotalVolumeCount() + volumes.size();
boolean moreVolumesThanAllowedByConfig = totalVolumesWhenAddedToExportMask > maxVolumesAllowedByConfig;
boolean moreVolumesThanAllowedByArray = totalVolumesWhenAddedToExportMask > policy.getMaxVolumesAllowed();
if (moreVolumesThanAllowedByArray || moreVolumesThanAllowedByConfig) {
_log.info(String.format("ExportMask %s (%s) is matching, but already %d volumes associated with it. " + "Adding %d volumes to it will make it go over its limit, hence it will not be used for placement.%n" + "The configuration allows %d volumes and the array allows %d as the max number of volumes to a MaskingView", exportMask.getMaskName(), exportMask.getId(), exportMask.returnTotalVolumeCount(), volumes.size(), maxVolumesAllowedByConfig, policy.getMaxVolumesAllowed()));
continue;
}
// Fill in the descriptor to be used for VPlex backend placement
descriptor.placeVolumes(exportMaskURI, volumes);
// be invalid (by some higher-level validation).
for (URI volumeURI : volumes.keySet()) {
for (URI equivalentExport : descriptor.getEquivalentExportMasks(exportMaskURI)) {
descriptor.addAsAlternativeExportForVolume(volumeURI, exportMaskURI);
}
}
}
}
} catch (Exception e) {
_log.error("Exception while trying to get suggestions for ExportMasks to used for volumes", e);
}
}
Aggregations