use of com.emc.storageos.volumecontroller.SRDFRecommendation in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method createVolumesAndDescriptors.
@Override
public List<VolumeDescriptor> createVolumesAndDescriptors(List<VolumeDescriptor> descriptors, String volumeLabel, Long size, Project project, VirtualArray varray, VirtualPool vpool, List<Recommendation> recommendations, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper capabilities) {
List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
// If processing SRDFCopyRecommendations, then just return the SRDFTargets.
for (Recommendation recommendation : recommendations) {
if (recommendation instanceof SRDFCopyRecommendation) {
SRDFRecommendation srdfRecommendation = (SRDFRecommendation) recommendation.getRecommendation();
// Get the Target structure
SRDFRecommendation.Target target = srdfRecommendation.getVirtualArrayTargetMap().get(recommendation.getVirtualArray());
if (target.getDescriptors() != null) {
volumeDescriptors.addAll(target.getDescriptors());
}
}
// so if we had SRDFCopyRecommendations, just return their descriptors now.
if (!volumeDescriptors.isEmpty()) {
return volumeDescriptors;
}
}
// operation for each volume to be created.
if (taskList == null) {
taskList = new TaskList();
}
Iterator<Recommendation> recommendationsIter;
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// prepare the volumes
List<URI> volumeURIs = prepareRecommendedVolumes(task, taskList, project, varray, vpool, capabilities.getResourceCount(), recommendations, consistencyGroup, volumeLabel, size.toString());
// Execute the volume creations requests for each recommendation.
recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
Recommendation recommendation = recommendationsIter.next();
volumeDescriptors.addAll(createVolumeDescriptors((SRDFRecommendation) recommendation, volumeURIs, capabilities));
// Log volume descriptor information
logVolumeDescriptorPrecreateInfo(volumeDescriptors, task);
}
return volumeDescriptors;
}
use of com.emc.storageos.volumecontroller.SRDFRecommendation in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method createVolumeDescriptors.
/**
* Prep work to call the orchestrator to create the volume descriptors
*
* @param recommendation
* recommendation object from SRDFRecommendation
* @param volumeURIs
* volumes already prepared
* @param capabilities
* vpool capabilities
* @return list of volume descriptors
* @throws ControllerException
*/
private List<VolumeDescriptor> createVolumeDescriptors(final SRDFRecommendation recommendation, final List<URI> volumeURIs, final VirtualPoolCapabilityValuesWrapper capabilities) throws ControllerException {
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
// Package up the prepared Volumes into descriptors
for (URI volumeURI : volumeURIs) {
Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
VolumeDescriptor.Type volumeType = VolumeDescriptor.Type.SRDF_SOURCE;
// created
if (recommendation.getVpoolChangeVolume() != null && recommendation.getVpoolChangeVolume().equals(volume.getId())) {
volumeType = VolumeDescriptor.Type.SRDF_EXISTING_SOURCE;
VolumeDescriptor desc = new VolumeDescriptor(volumeType, volume.getStorageController(), volume.getId(), volume.getPool(), null, capabilities, volume.getCapacity());
Map<String, Object> volumeParams = new HashMap<String, Object>();
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_EXISTING_VOLUME_ID, recommendation.getVpoolChangeVolume());
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_NEW_VPOOL_ID, recommendation.getVpoolChangeVpool());
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_OLD_VPOOL_ID, volume.getVirtualPool());
desc.setParameters(volumeParams);
descriptors.add(desc);
_log.info("Adding Source Volume Descriptor for: " + desc.toString());
} else {
// Normal create-from-scratch flow
if (volume.getPersonality() == null) {
throw APIException.badRequests.srdfVolumeMissingPersonalityAttribute(volume.getId());
}
if (volume.getPersonality().equals(Volume.PersonalityTypes.TARGET.toString())) {
volumeType = VolumeDescriptor.Type.SRDF_TARGET;
}
VolumeDescriptor desc = new VolumeDescriptor(volumeType, volume.getStorageController(), volume.getId(), volume.getPool(), null, capabilities, volume.getCapacity());
descriptors.add(desc);
// Then it will be returned if accessed by SRDFCopyRecommendation
if (volumeType == VolumeDescriptor.Type.SRDF_TARGET) {
// Find the appropriate target
SRDFRecommendation.Target target = recommendation.getVirtualArrayTargetMap().get(volume.getVirtualArray());
if (target != null) {
List<VolumeDescriptor> targetDescriptors = target.getDescriptors();
if (targetDescriptors == null) {
targetDescriptors = new ArrayList<VolumeDescriptor>();
target.setDescriptors(targetDescriptors);
}
targetDescriptors.add(desc);
}
_log.error("No target recommendation found in the recommendation virtualArrayTargetMap");
}
_log.info("Adding Non-Source Volume Descriptor for: " + desc.toString());
}
}
return descriptors;
}
use of com.emc.storageos.volumecontroller.SRDFRecommendation in project coprhd-controller by CoprHD.
the class AbstractBlockServiceApiImpl method createVolumesAndDescriptors.
@Override
public List<VolumeDescriptor> createVolumesAndDescriptors(List<VolumeDescriptor> descriptors, String name, Long size, Project project, VirtualArray varray, VirtualPool vpool, List<Recommendation> recommendations, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper vpoolCapabilities) {
BlockServiceApi api = null;
List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
for (Recommendation recommendation : recommendations) {
if (recommendation instanceof SRDFRecommendation || recommendation instanceof SRDFCopyRecommendation) {
api = BlockService.getBlockServiceImpl(DiscoveredDataObject.Type.srdf.name());
} else if (recommendation instanceof VolumeRecommendation) {
api = BlockService.getBlockServiceImpl(BlockServiceApi.DEFAULT);
} else {
String message = String.format("No BlockServiceApiImpl to handle recommendation of class: ", recommendation.getClass().getName());
s_logger.error(message);
throw WorkflowException.exceptions.workflowConstructionError(message);
}
volumeDescriptors.addAll(api.createVolumesAndDescriptors(descriptors, name, size, project, varray, vpool, recommendations, taskList, task, vpoolCapabilities));
}
return volumeDescriptors;
}
use of com.emc.storageos.volumecontroller.SRDFRecommendation in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method makeBackendVolumeDescriptors.
/**
* Takes a list of recommendations and makes the backend volumes and volume descriptors needed to
* provision. When possible (e.g. for SRDF and Block), All recommendations must be in single varray.
* calls the underlying storage routine createVolumesAndDescriptors().
*
* @param recommendations -- a VPlex recommendation list
* @param project - Project containing the Vplex volumes
* @param vplexProject -- private project of the Vplex
* @param rootVpool -- top level Virtual Pool (VpoolUse.ROOT)
* @param varrayCount -- instance count of the varray being provisioned
* @param size -- size of each volume
* @param backendCG -- the CG to be used on the backend Storage Systems
* @param vPoolCapabilities - a VirtualPoolCapabilityValuesWrapper containing provisioning arguments
* @param createTask -- boolean if true creates a task
* @param task -- Overall task id
* @return -- list of VolumeDescriptors to be provisioned
*/
private List<VolumeDescriptor> makeBackendVolumeDescriptors(List<VPlexRecommendation> recommendations, Project project, Project vplexProject, VirtualPool rootVpool, String volumeLabel, int varrayCount, long size, BlockConsistencyGroup backendCG, VirtualPoolCapabilityValuesWrapper vPoolCapabilities, boolean createTask, String task) {
VPlexRecommendation firstRecommendation = recommendations.get(0);
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
URI varrayId = firstRecommendation.getVirtualArray();
VirtualPool vpool = firstRecommendation.getVirtualPool();
s_logger.info("Generated backend descriptors for {} recommendations varray {}", recommendations.size(), varrayCount);
vPoolCapabilities.put(VirtualPoolCapabilityValuesWrapper.AUTO_TIER__POLICY_NAME, vpool.getAutoTierPolicyName());
if (firstRecommendation.getRecommendation() != null) {
// If these recommendations have lower level recommendation, process them.
// This path is used for the source side of Distributed Volumes and for Local volumes
// where we support building on top of SRDF or the BlockStorage as appropriate.
List<Recommendation> childRecommendations = new ArrayList<Recommendation>();
Recommendation childRecommendation = null;
for (VPlexRecommendation recommendation : recommendations) {
childRecommendation = recommendation.getRecommendation();
childRecommendations.add(childRecommendation);
}
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayId);
String newVolumeLabel = generateVolumeLabel(volumeLabel, varrayCount, 0, 0);
boolean srdfTarget = (childRecommendation instanceof SRDFCopyRecommendation);
boolean srdfSource = (childRecommendation instanceof SRDFRecommendation);
if (srdfTarget) {
newVolumeLabel = newVolumeLabel + "-target";
} else if (srdfSource) {
} else {
// nothing special about these volumes, hide them in the vplex project
// We can't use the vplexProject for SRDF volumes as they determine their RDF group
// grom the project.
project = vplexProject;
}
TaskList taskList = new TaskList();
descriptors = super.createVolumesAndDescriptors(descriptors, newVolumeLabel, size, project, varray, vpool, childRecommendations, taskList, task, vPoolCapabilities);
VolumeDescriptor.Type[] types;
if (srdfTarget) {
types = new VolumeDescriptor.Type[] { VolumeDescriptor.Type.SRDF_TARGET };
} else {
types = new VolumeDescriptor.Type[] { VolumeDescriptor.Type.BLOCK_DATA, VolumeDescriptor.Type.SRDF_SOURCE, VolumeDescriptor.Type.SRDF_EXISTING_SOURCE };
}
descriptors = VolumeDescriptor.filterByType(descriptors, types);
for (VolumeDescriptor descriptor : descriptors) {
Volume volume = _dbClient.queryObject(Volume.class, descriptor.getVolumeURI());
s_logger.info(String.format("Received prepared volume %s (%s, args) type %s", volume.getLabel(), volume.getId(), descriptor.getType().name()));
volume.addInternalFlags(DataObject.Flag.INTERNAL_OBJECT);
configureCGAndReplicationGroup(rootVpool, vPoolCapabilities, backendCG, volume);
_dbClient.updateObject(volume);
}
return descriptors;
}
// Sum resourceCount across all recommendations
int totalResourceCount = 0;
for (VPlexRecommendation recommendation : recommendations) {
totalResourceCount += recommendation.getResourceCount();
}
// The code below is used for the HA side of distributed volumes.
// The HA side does not currently call the lower level schedulers to get descriptors.
s_logger.info("Processing recommendations for Virtual Array {}", varrayId);
int volumeCounter = 0;
for (VPlexRecommendation recommendation : recommendations) {
for (int i = 0; i < recommendation.getResourceCount(); i++) {
vpool = recommendation.getVirtualPool();
URI storageDeviceURI = recommendation.getSourceStorageSystem();
URI storagePoolURI = recommendation.getSourceStoragePool();
String newVolumeLabel = generateVolumeLabel(volumeLabel, varrayCount, volumeCounter, totalResourceCount);
validateVolumeLabel(newVolumeLabel, project);
s_logger.info("Volume label is {}", newVolumeLabel);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayId);
// This is also handled in StorageScheduler.prepareRecomendedVolumes
long thinVolumePreAllocationSize = 0;
if (null != vpool.getThinVolumePreAllocationPercentage()) {
thinVolumePreAllocationSize = VirtualPoolUtil.getThinVolumePreAllocationSize(vpool.getThinVolumePreAllocationPercentage(), size);
}
Volume volume = prepareVolume(VolumeType.BLOCK_VOLUME, null, size, thinVolumePreAllocationSize, vplexProject, varray, vpool, storageDeviceURI, storagePoolURI, newVolumeLabel, backendCG, vPoolCapabilities);
configureCGAndReplicationGroup(rootVpool, vPoolCapabilities, backendCG, volume);
volume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.persistObject(volume);
if (createTask) {
_dbClient.createTaskOpStatus(Volume.class, volume.getId(), task, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME);
}
s_logger.info("Prepared volume {} ({}) ", volume.getLabel(), volume.getId());
VolumeDescriptor descriptor = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, storageDeviceURI, volume.getId(), storagePoolURI, backendCG == null ? null : backendCG.getId(), vPoolCapabilities, size);
descriptors.add(descriptor);
volumeCounter++;
}
}
return descriptors;
}
use of com.emc.storageos.volumecontroller.SRDFRecommendation in project coprhd-controller by CoprHD.
the class SRDFScheduler method scheduleStorageSourcePoolConstraint.
/**
* Schedule storage based on the incoming storage pools for source volumes. Find a source
* storage pool that can provide a source volume that satisfies the vpool's criteria for all
* targets varrays required and build a recommendation structure to describe the findings.
*
* Strategy:
*
* 0. When we come into method, we already have a list of candidate source pools, which may be
* in multiple arrays 1. Get matching pools for each of the target virtual arrays based on the
* target virtual pool. 2. Make a map of virtual array to potential pools in step 1. 3. Find a
* pool from each entry in the map that belongs to a storage system that is connected via SRDF
* (with the same policy) to the specific candidate pool we're looking at. 4. Generate an SRDF
* Recommendation object that reflects the combination we found.
*
* @param varray
* varray requested for source
* @param srdfVarrays
* Varray to protect this volume to.
* @param vpool
* vpool requested
* @param capabilities
* parameters
* @param candidatePools
* candidate pools to use for source
* @param vpoolChangeVolume
* vpool change volume, if applicable
* @return list of Recommendation objects to satisfy the request
*/
private List<Recommendation> scheduleStorageSourcePoolConstraint(final VirtualArray varray, final Project project, final VirtualPool vpool, final VirtualPoolCapabilityValuesWrapper capabilities, final List<StoragePool> candidatePools, final Volume vpoolChangeVolume, final URI consistencyGroupUri) {
// Initialize a list of recommendations to be returned.
List<Recommendation> recommendations = new ArrayList<Recommendation>();
if (capabilities.getResourceCount() == 1) {
// For single resource request, select storage pool randomly from all candidate pools
// (to minimize collisions).
Collections.shuffle(candidatePools);
} else {
// Sort all pools in descending order by free capacity (first order) and in ascending
// order by ratio
// of pool's subscribed capacity to total capacity(suborder). This order is kept through
// the selection procedure.
_blockScheduler.sortPools(candidatePools);
}
List<VirtualArray> targetVarrays = getTargetVirtualArraysForVirtualPool(project, vpool, _dbClient, _permissionsHelper);
// Attempt to use these pools for selection based on target
StringBuffer sb = new StringBuffer("Determining if SRDF is possible from " + varray.getId() + " to: ");
for (VirtualArray targetVarray : targetVarrays) {
sb.append(targetVarray.getId()).append(" ");
}
_log.info(sb.toString());
// The port group provided is belongs to SRDF source storage system.
// If port group set in capabilities, ViPR looks storage pools from given PG's storage system only
// Need to remove PORT_GROUP entry from capabilities for SRDF target volume,
// so that ViPR picks SRDF target storage pools from right storage system.
//
capabilities.removeCapabilityEntry(VirtualPoolCapabilityValuesWrapper.PORT_GROUP);
Map<String, Object> attributeMap = new HashMap<String, Object>();
Map<VirtualArray, List<StoragePool>> varrayPoolMap = getMatchingPools(targetVarrays, vpool, capabilities, attributeMap);
if (varrayPoolMap == null || varrayPoolMap.isEmpty()) {
// No matching storage pools found for any of the target varrays. There are no target
// storage pools that match the passed vpool parameters and protocols and/or there are
// no pools that have enough
// capacity to hold at least one resource of the requested size.
Set<String> tmpTargetVarrays = new HashSet<String>();
sb = new StringBuffer("No matching storage pools found for any of the target varrays: [ ");
for (VirtualArray targetVarray : targetVarrays) {
sb.append(targetVarray.getId()).append(" ");
tmpTargetVarrays.add(targetVarray.getLabel());
}
sb.append("]. There are no storage pools that match the passed vpool parameters and protocols and/or " + "there are no pools that have enough capacity to hold at least one resource of the requested size.");
StringBuffer errorMessage = new StringBuffer();
if (attributeMap.get(AttributeMatcher.ERROR_MESSAGE) != null) {
errorMessage = (StringBuffer) attributeMap.get(AttributeMatcher.ERROR_MESSAGE);
}
_log.error(sb.toString());
throw APIException.badRequests.noMatchingRecoverPointStoragePoolsForVpoolAndVarrays(vpool.getLabel(), tmpTargetVarrays, errorMessage.toString());
}
// Reduce the source and target pool down to the pools available via target.
Set<SRDFPoolMapping> tmpDestPoolsList = getSRDFPoolMappings(varray, candidatePools, varrayPoolMap, vpool, vpoolChangeVolume, capabilities.getSize());
if (tmpDestPoolsList == null || tmpDestPoolsList.isEmpty()) {
// There are no target pools from any of the target varrays that share the
// same SRDF connectivity as any of the source varray pools. Placement cannot
// be achieved.
Set<String> tmpSRDFVarrays = new HashSet<String>();
sb = new StringBuffer("No matching target pool found for varray: ");
sb.append(varray.getId());
sb.append(" and vpool: ");
sb.append(vpool.getId());
sb.append(" to varrays: ");
for (VirtualArray targetVarray : targetVarrays) {
sb.append(targetVarray.getId()).append(" ");
tmpSRDFVarrays.add(targetVarray.getLabel());
}
// No matching target pool found for varray so throw an exception
// indicating a placement error.
_log.error(sb.toString());
throw APIException.badRequests.noMatchingSRDFPools(varray.getLabel(), vpool.getLabel(), tmpSRDFVarrays);
}
// Fire business rules to determine which SRDFPoolMappings can be eliminated
// from consideration for placement.
Set<SRDFPoolMapping> srcDestPoolsList = fireSRDFPlacementRules(tmpDestPoolsList, capabilities.getResourceCount());
// the volume request configuration throw an exception.
if (srcDestPoolsList == null || srcDestPoolsList.isEmpty()) {
throw APIException.badRequests.srdfNoSolutionsFoundError();
}
// Get a new source pool list for pool selection
Set<StoragePool> sourceCandidatePoolList = new HashSet<StoragePool>();
for (SRDFPoolMapping srdfPoolMapping : srcDestPoolsList) {
sourceCandidatePoolList.add(srdfPoolMapping.sourceStoragePool);
}
// Try with the storagePoolList as it currently is.
// If we get through the process and couldn't achieve full target, we should
// take out the matched pool from the storagePoolList and try again.
List<StoragePool> sourcePoolList = new ArrayList<StoragePool>();
sourcePoolList.addAll(sourceCandidatePoolList);
// We need to create recommendations for one or more pools
// that can accommodate the number of requested resources.
// We start by trying to place all resources in a single
// pool if one exists that can accommodate all requested
// resources and work our way down as necessary trying to
// minimize the number of pools used to satisfy the request.
int recommendedCount = 0;
int currentCount = capabilities.getResourceCount();
// satisfied all of the requests.
while (!sourcePoolList.isEmpty() && recommendedCount < capabilities.getResourceCount()) {
// This request will either decrement the count OR shrink the sourcePoolList
// In the case of decrementing the count, it's because it was successful at
// placing volume(s). If it wasn't, that source pool goes in the trash and we
// try the next one.
long resourceSize = capabilities.getSize();
int resourceCount = capabilities.getResourceCount();
// We need to find a pool that matches the capacity for all the source/target luns
long requiredPoolCapacity = resourceSize * currentCount;
_log.info("Required pool capacity: " + requiredPoolCapacity);
StoragePool poolWithRequiredCapacity = _blockScheduler.getPoolMatchingCapacity(requiredPoolCapacity, resourceSize, currentCount, sourcePoolList, VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(vpool.getSupportedProvisioningType()), null);
// pool, setting the resource count for that recommendation.
if (poolWithRequiredCapacity != null) {
StoragePool recommendedPool = poolWithRequiredCapacity;
_log.debug("Recommending storage pool {} for {} resources.", recommendedPool.getId(), currentCount);
// Now we know what pool was selected, we can grab the target pools that jive with that
// source
Map<VirtualArray, List<StoragePool>> targetVarrayPoolMap = findDestPoolsForSourcePool(targetVarrays, srcDestPoolsList, recommendedPool, vpool);
if (targetVarrayPoolMap == null || targetVarrayPoolMap.isEmpty()) {
// A valid source pool was found but there are no pools from any of the
// target varrays that can protect it.
_log.info("There are no pools from any of the target varrays that can protect the source " + "varray pool {}. Will try using another source varray pool.", recommendedPool.getLabel());
// Remove the source pool and try the next one.
sourcePoolList.remove(poolWithRequiredCapacity);
} else {
// A single recommendation object will create a set of volumes for an SRDF pair.
SRDFRecommendation rec = new SRDFRecommendation();
// For each target varray, we start the process of matching source and destination
// pools to one storage system.
Map<VirtualArray, Set<StorageSystem>> varrayTargetDeviceMap = new HashMap<VirtualArray, Set<StorageSystem>>();
for (VirtualArray targetVarray1 : targetVarrayPoolMap.keySet()) {
if (rec.getSourceStoragePool() == null) {
rec.setVirtualArray(varray.getId());
rec.setVirtualPool(vpool);
rec.setSourceStoragePool(recommendedPool.getId());
rec.setResourceCount(currentCount);
rec.setSourceStorageSystem(recommendedPool.getStorageDevice());
rec.setVirtualArrayTargetMap(new HashMap<URI, Target>());
rec.setVpoolChangeVolume(vpoolChangeVolume != null ? vpoolChangeVolume.getId() : null);
rec.setVpoolChangeVpool(vpoolChangeVolume != null ? vpool.getId() : null);
}
if (targetVarrayPoolMap.get(targetVarray1) == null || targetVarrayPoolMap.get(targetVarray1).isEmpty()) {
_log.error("Could not find any suitable storage pool for target varray: " + targetVarray1.getLabel());
throw APIException.badRequests.unableToFindSuitablePoolForTargetVArray(targetVarray1.getLabel());
}
// Select the destination pool based on what was selected as source
StoragePool destinationPool = _blockScheduler.selectPool(targetVarrayPoolMap.get(targetVarray1));
_log.info("Destination target for varray " + targetVarray1.getLabel() + " was determined to be in pool: " + destinationPool.getLabel());
Target target = new Target();
target.setTargetPool(destinationPool.getId());
target.setTargetStorageDevice(destinationPool.getStorageDevice());
// Set the copy mode
Map<URI, VpoolRemoteCopyProtectionSettings> settingsMap = VirtualPool.getRemoteProtectionSettings(vpool, _dbClient);
target.setCopyMode(settingsMap.get(targetVarray1.getId()).getCopyMode());
if (target.getCopyMode() == null) {
// Set the default if not set
target.setCopyMode(RemoteDirectorGroup.SupportedCopyModes.ASYNCHRONOUS.toString());
}
// Generate a list of storage systems that match the src and dest pools lists.
Set<StorageSystem> targetDeviceList = findMatchingSRDFPools(targetVarray1, srcDestPoolsList, recommendedPool, destinationPool);
if (targetDeviceList.isEmpty()) {
_log.error("Could not find a Storage pool for target varray: " + targetVarray1.getLabel());
throw APIException.badRequests.unableToFindSuitablePoolForTargetVArray(targetVarray1.getLabel());
}
rec.getVirtualArrayTargetMap().put(targetVarray1.getId(), target);
// Add this potential solution to the map.
varrayTargetDeviceMap.put(targetVarray1, targetDeviceList);
}
// Grab any element since all varrays need to have the same SRDF connectivity.
VirtualArray firstVarray = null;
for (VirtualArray baseVarray : varrayTargetDeviceMap.keySet()) {
firstVarray = baseVarray;
break;
}
_log.info("Chose the first varray for SRDF comparison: " + firstVarray.getLabel());
// Now go through each storage system in this varray and see if it matches up
findInsertRecommendation(rec, firstVarray, recommendations, candidatePools, recommendedPool, varrayTargetDeviceMap, project, consistencyGroupUri);
// Update the count of resources for which we have created
// a recommendation.
recommendedCount += currentCount;
// Update the current count. The conditional prevents
// unnecessary attempts to look for pools of a given
// free capacity that we already know don't exist. For
// example, say we want 100 resources and the first pool
// we find that can hold multiple resources can hold only
// 10. We don't want to continue looking for pools that
// can hold 90,89,88,...11 resources. We just want to
// see if there is another pool that can hold 10 resources,
// then 9,8, and so on.
currentCount = resourceCount - recommendedCount < currentCount ? resourceCount - recommendedCount : currentCount;
}
} else {
// If we can't find a pool that can hold the current
// count of resources, decrease the count so that we look
// for pools that can hold the next smaller number.
currentCount--;
// Clear out the source pool list (which will cause failure)
sourcePoolList.clear();
}
// log an error and clear the list of recommendations.
if (recommendedCount != resourceCount) {
_log.error("Could not find matching pools for varray {} & vpool {}", varray.getId(), vpool.getId());
recommendations.clear();
// Remove the pool we chose from the list so we can try again.
sourcePoolList.remove(poolWithRequiredCapacity);
}
}
return recommendations;
}
Aggregations