use of com.emc.storageos.db.client.model.VpoolRemoteCopyProtectionSettings in project coprhd-controller by CoprHD.
the class BlockRemoteReplicationIngestOrchestrator method validateSourceVolumeVarrayWithTargetVPool.
/**
* Validate the SourceVolume VArray details with ingested target volumes
* VArray.
*
* @param unManagedVolume
* @param VirtualPool
* @return
*/
private void validateSourceVolumeVarrayWithTargetVPool(UnManagedVolume unManagedVolume, VirtualPool sourceVPool) {
StringSetMap unManagedVolumeInformation = unManagedVolume.getVolumeInformation();
// find whether all targets are ingested
StringSet targetUnManagedVolumeGuids = unManagedVolumeInformation.get(SupportedVolumeInformation.REMOTE_MIRRORS.toString());
if (null != targetUnManagedVolumeGuids && !targetUnManagedVolumeGuids.isEmpty()) {
StringSet targetVolumeNativeGuids = VolumeIngestionUtil.getListofVolumeIds(targetUnManagedVolumeGuids);
// check whether target exists
List<URI> targetUris = VolumeIngestionUtil.getVolumeUris(targetVolumeNativeGuids, _dbClient);
if (null == targetUris || targetUris.isEmpty()) {
_logger.info("None of the targets ingested for source volume: {}", unManagedVolume.getNativeGuid());
} else {
List<Volume> targetVolumes = _dbClient.queryObject(Volume.class, targetUris);
for (Volume targetVolume : targetVolumes) {
Map<URI, VpoolRemoteCopyProtectionSettings> settings = sourceVPool.getRemoteProtectionSettings(sourceVPool, _dbClient);
if (null == settings || settings.size() == 0 || !settings.containsKey(targetVolume.getVirtualArray())) {
_logger.info("Target Volume's VArray {} is not matching already ingested source volume virtual pool's remote VArray {}", targetVolume.getVirtualArray(), Joiner.on(",").join(settings.keySet()));
throw IngestionException.exceptions.unmanagedSRDFSourceVolumeVArrayMismatch(unManagedVolume.getLabel(), targetVolume.getVirtualArray().toString());
}
}
}
}
}
use of com.emc.storageos.db.client.model.VpoolRemoteCopyProtectionSettings in project coprhd-controller by CoprHD.
the class RemoteReplicationIngestor method runRemoteReplicationStepsOnTarget.
/**
* If unmanaged volume is a Target Volume, then 1. Find if source is ingested 2. If yes, then
* find whether expected targets of this source had been ingested already excluding the current
* target. 3. If yes, establish links between source and targets. 4. If not,then make sure
* unmanaged volume hasn't been deleted.
*
* @param unManagedVolume
* @param volume
* @param unManagedVolumes
* @param type
* @return
*/
@SuppressWarnings("deprecation")
private static boolean runRemoteReplicationStepsOnTarget(UnManagedVolume unManagedVolume, Volume volume, List<UnManagedVolume> unManagedVolumes, String type, DbClient dbClient) {
boolean removeUnManagedVolume = false;
StringSetMap unManagedVolumeInformation = unManagedVolume.getVolumeInformation();
String sourceUnManagedVolumeId = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.REMOTE_MIRROR_SOURCE_VOLUME.toString(), unManagedVolumeInformation);
_logger.info("Type {} Source Native Guid {}", type, sourceUnManagedVolumeId);
String sourceVolumeId = sourceUnManagedVolumeId.replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
List<URI> sourceUris = dbClient.queryByConstraint(AlternateIdConstraint.Factory.getVolumeNativeGuidConstraint(sourceVolumeId));
String copyMode = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.REMOTE_COPY_MODE.toString(), unManagedVolumeInformation);
String raGroup = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.REMOTE_MIRROR_RDF_GROUP.toString(), unManagedVolumeInformation);
volume.setSrdfCopyMode(copyMode);
volume.setSrdfGroup(URI.create(raGroup));
if (sourceUris.isEmpty()) {
_logger.info("Source {} Not found for target {}", sourceVolumeId, volume.getId());
} else {
// check whether all targets of the source are ingested
List<URI> sourceUnmanagedUris = dbClient.queryByConstraint(AlternateIdConstraint.Factory.getVolumeInfoNativeIdConstraint(sourceUnManagedVolumeId));
if (!sourceUnmanagedUris.isEmpty()) {
UnManagedVolume sourceUnManagedVolume = dbClient.queryObject(UnManagedVolume.class, sourceUnmanagedUris.get(0));
if (null != sourceUnManagedVolume) {
StringSet targetUnManagedVolumeGuids = sourceUnManagedVolume.getVolumeInformation().get(SupportedVolumeInformation.REMOTE_MIRRORS.toString());
if (null != targetUnManagedVolumeGuids && !targetUnManagedVolumeGuids.isEmpty()) {
StringSet targetVolumeNativeGuids = VolumeIngestionUtil.getListofVolumeIds(targetUnManagedVolumeGuids);
List<URI> targetUris = VolumeIngestionUtil.getVolumeUris(targetVolumeNativeGuids, dbClient);
targetUris.add(volume.getId());
_logger.info("Expected targets Size {} , found {} ", targetUnManagedVolumeGuids.size(), targetUris.size());
_logger.debug("Expected Targets {} : Found {}", Joiner.on("\t").join(targetVolumeNativeGuids), Joiner.on("\t").join(targetUris));
List<Volume> modifiedVolumes = new ArrayList<Volume>();
if (targetUris.size() == targetUnManagedVolumeGuids.size()) {
// if all other targets are ingested, then
Volume sourceVolume = dbClient.queryObject(Volume.class, sourceUris.get(0));
// check whether the source Volume's VPool is actually having this target Volume's varray
// specified as remote
VirtualPool sourceVPool = dbClient.queryObject(VirtualPool.class, sourceVolume.getVirtualPool());
Map<URI, VpoolRemoteCopyProtectionSettings> settings = sourceVPool.getRemoteProtectionSettings(sourceVPool, dbClient);
if (null == settings || settings.size() == 0 || !settings.containsKey(volume.getVirtualArray())) {
_logger.info("Target Volume's VArray {} is not matching already ingested source volume virtual pool's remote VArray ", volume.getVirtualArray());
return false;
}
sourceVolume.setSrdfTargets(VolumeIngestionUtil.convertUrisToStrings(targetUris));
_logger.info("Clearing internal flag for source volume {} found", sourceVolume.getNativeGuid());
sourceVolume.clearInternalFlags(INTERNAL_VOLUME_FLAGS);
_logger.debug("Set srdf target for source volume {} found", sourceVolume.getId());
modifiedVolumes.add(sourceVolume);
// source unmanagedVolume
sourceUnManagedVolume.setInactive(true);
unManagedVolumes.add(sourceUnManagedVolume);
// this target unmanaged volume
volume.setSrdfParent(new NamedURI(sourceVolume.getId(), sourceVolume.getLabel()));
_logger.debug("target volume set parent", volume.getId());
removeUnManagedVolume = true;
// handle other target volumes
List<Volume> targetVolumes = dbClient.queryObject(Volume.class, targetUris);
for (Volume targetVolume : targetVolumes) {
_logger.debug("Set parent for remaining target volume {}", targetVolume.getId());
targetVolume.setSrdfParent(new NamedURI(sourceVolume.getId(), sourceVolume.getLabel()));
targetVolume.clearInternalFlags(INTERNAL_VOLUME_FLAGS);
}
modifiedVolumes.addAll(targetVolumes);
// target unmanaged volumes
List<UnManagedVolume> targetUnManagedVolumes = dbClient.queryObject(UnManagedVolume.class, VolumeIngestionUtil.getUnManagedVolumeUris(targetUnManagedVolumeGuids, dbClient));
for (UnManagedVolume targetUnManagedVol : targetUnManagedVolumes) {
_logger.debug("Set Target unmanaged volume inactive {}", targetUnManagedVol.getId());
targetUnManagedVol.setInactive(true);
unManagedVolumes.add(targetUnManagedVol);
}
dbClient.persistObject(modifiedVolumes);
_logger.info("Target Volume successfully ingested with remote replication links", volume.getNativeGuid());
} else {
// set volume flag to false
_logger.info("Expected Targets not found for source Volume {}", sourceUnManagedVolumeId);
}
} else {
_logger.info("Targets information not found on source volume {}." + "This could happen when parallel ingests are tried or the actual volume got deleted on array.", sourceUnManagedVolumeId);
}
}
}
}
return removeUnManagedVolume;
}
use of com.emc.storageos.db.client.model.VpoolRemoteCopyProtectionSettings in project coprhd-controller by CoprHD.
the class PlacementManager method getRecommendationsForVirtualPool.
/**
* A call that can return multiple placement results, one for the ROOT level Vpool, but also others
* for example for SRDF_COPY. The output is a map of Vpool use to the list of recommendations for that Vpool.
* @param virtualArray - Virtual Array object
* @param project - Project object
* @param virtualPool- Virtual Pool object
* @param capabilities - VirtualPoolCapabilityValuesWrapper contains parameters
* @return Map of VpoolUse to List of Recommendations for that use.
*/
public Map<VpoolUse, List<Recommendation>> getRecommendationsForVirtualPool(VirtualArray virtualArray, Project project, VirtualPool virtualPool, VirtualPoolCapabilityValuesWrapper capabilities) {
Map<VpoolUse, List<Recommendation>> recommendationMap = new HashMap<VpoolUse, List<Recommendation>>();
// Invoke scheduling on the top level Virtual Pool (termed ROOT). This virtual pool
// may have within it other virtual pools that may need to be separately scheduled.
// the apisvc vpool
VpoolUse use = VpoolUse.ROOT;
Scheduler scheduler = getNextScheduler(null, virtualPool, use);
List<Recommendation> newRecommendations = scheduler.getRecommendationsForVpool(virtualArray, project, virtualPool, use, capabilities, recommendationMap);
if (newRecommendations.isEmpty()) {
return recommendationMap;
}
recommendationMap.put(use, newRecommendations);
// Loop over the SRDF Copies, invoking a scheduler on them.
if (VirtualPool.vPoolSpecifiesSRDF(virtualPool)) {
Map<URI, VpoolRemoteCopyProtectionSettings> remoteCopyMap = VirtualPool.getRemoteProtectionSettings(virtualPool, dbClient);
for (Map.Entry<URI, VpoolRemoteCopyProtectionSettings> entry : remoteCopyMap.entrySet()) {
// Invoke scheduler on SRDF copies
use = VpoolUse.SRDF_COPY;
URI vArrayURI = entry.getValue().getVirtualArray();
VirtualArray vArray = dbClient.queryObject(VirtualArray.class, vArrayURI);
URI vPoolURI = entry.getValue().getVirtualPool();
VirtualPool vPool = dbClient.queryObject(VirtualPool.class, vPoolURI);
scheduler = getNextScheduler(null, vPool, use);
newRecommendations = scheduler.getRecommendationsForVpool(vArray, project, vPool, use, capabilities, recommendationMap);
if (recommendationMap.containsKey(use)) {
recommendationMap.get(use).addAll(newRecommendations);
} else {
recommendationMap.put(use, newRecommendations);
}
}
}
logRecommendations(recommendationMap);
return recommendationMap;
}
use of com.emc.storageos.db.client.model.VpoolRemoteCopyProtectionSettings in project coprhd-controller by CoprHD.
the class SRDFScheduler method filterPoolsForSupportedActiveModeProvider.
private List<StoragePool> filterPoolsForSupportedActiveModeProvider(List<StoragePool> candidatePools, VirtualPool vpool) {
Map<URI, VpoolRemoteCopyProtectionSettings> remoteProtectionSettings = vpool.getRemoteProtectionSettings(vpool, _dbClient);
if (remoteProtectionSettings != null) {
for (URI varrayURI : remoteProtectionSettings.keySet()) {
VpoolRemoteCopyProtectionSettings remoteCopyProtectionSettings = remoteProtectionSettings.get(varrayURI);
String copyMode = remoteCopyProtectionSettings.getCopyMode();
if (copyMode.equals(SupportedCopyModes.ACTIVE.toString())) {
SRDFMetroMatcher srdfMetroMatcher = new SRDFMetroMatcher();
srdfMetroMatcher.setCoordinatorClient(_coordinator);
srdfMetroMatcher.setObjectCache(new ObjectLocalCache(_dbClient, false));
return srdfMetroMatcher.filterPoolsForSRDFActiveMode(candidatePools);
}
}
}
return candidatePools;
}
use of com.emc.storageos.db.client.model.VpoolRemoteCopyProtectionSettings in project coprhd-controller by CoprHD.
the class SRDFScheduler method scheduleStorageSourcePoolConstraint.
/**
* Schedule storage based on the incoming storage pools for source volumes. Find a source
* storage pool that can provide a source volume that satisfies the vpool's criteria for all
* targets varrays required and build a recommendation structure to describe the findings.
*
* Strategy:
*
* 0. When we come into method, we already have a list of candidate source pools, which may be
* in multiple arrays 1. Get matching pools for each of the target virtual arrays based on the
* target virtual pool. 2. Make a map of virtual array to potential pools in step 1. 3. Find a
* pool from each entry in the map that belongs to a storage system that is connected via SRDF
* (with the same policy) to the specific candidate pool we're looking at. 4. Generate an SRDF
* Recommendation object that reflects the combination we found.
*
* @param varray
* varray requested for source
* @param srdfVarrays
* Varray to protect this volume to.
* @param vpool
* vpool requested
* @param capabilities
* parameters
* @param candidatePools
* candidate pools to use for source
* @param vpoolChangeVolume
* vpool change volume, if applicable
* @return list of Recommendation objects to satisfy the request
*/
private List<Recommendation> scheduleStorageSourcePoolConstraint(final VirtualArray varray, final Project project, final VirtualPool vpool, final VirtualPoolCapabilityValuesWrapper capabilities, final List<StoragePool> candidatePools, final Volume vpoolChangeVolume, final URI consistencyGroupUri) {
// Initialize a list of recommendations to be returned.
List<Recommendation> recommendations = new ArrayList<Recommendation>();
if (capabilities.getResourceCount() == 1) {
// For single resource request, select storage pool randomly from all candidate pools
// (to minimize collisions).
Collections.shuffle(candidatePools);
} else {
// Sort all pools in descending order by free capacity (first order) and in ascending
// order by ratio
// of pool's subscribed capacity to total capacity(suborder). This order is kept through
// the selection procedure.
_blockScheduler.sortPools(candidatePools);
}
List<VirtualArray> targetVarrays = getTargetVirtualArraysForVirtualPool(project, vpool, _dbClient, _permissionsHelper);
// Attempt to use these pools for selection based on target
StringBuffer sb = new StringBuffer("Determining if SRDF is possible from " + varray.getId() + " to: ");
for (VirtualArray targetVarray : targetVarrays) {
sb.append(targetVarray.getId()).append(" ");
}
_log.info(sb.toString());
// The port group provided is belongs to SRDF source storage system.
// If port group set in capabilities, ViPR looks storage pools from given PG's storage system only
// Need to remove PORT_GROUP entry from capabilities for SRDF target volume,
// so that ViPR picks SRDF target storage pools from right storage system.
//
capabilities.removeCapabilityEntry(VirtualPoolCapabilityValuesWrapper.PORT_GROUP);
Map<String, Object> attributeMap = new HashMap<String, Object>();
Map<VirtualArray, List<StoragePool>> varrayPoolMap = getMatchingPools(targetVarrays, vpool, capabilities, attributeMap);
if (varrayPoolMap == null || varrayPoolMap.isEmpty()) {
// No matching storage pools found for any of the target varrays. There are no target
// storage pools that match the passed vpool parameters and protocols and/or there are
// no pools that have enough
// capacity to hold at least one resource of the requested size.
Set<String> tmpTargetVarrays = new HashSet<String>();
sb = new StringBuffer("No matching storage pools found for any of the target varrays: [ ");
for (VirtualArray targetVarray : targetVarrays) {
sb.append(targetVarray.getId()).append(" ");
tmpTargetVarrays.add(targetVarray.getLabel());
}
sb.append("]. There are no storage pools that match the passed vpool parameters and protocols and/or " + "there are no pools that have enough capacity to hold at least one resource of the requested size.");
StringBuffer errorMessage = new StringBuffer();
if (attributeMap.get(AttributeMatcher.ERROR_MESSAGE) != null) {
errorMessage = (StringBuffer) attributeMap.get(AttributeMatcher.ERROR_MESSAGE);
}
_log.error(sb.toString());
throw APIException.badRequests.noMatchingRecoverPointStoragePoolsForVpoolAndVarrays(vpool.getLabel(), tmpTargetVarrays, errorMessage.toString());
}
// Reduce the source and target pool down to the pools available via target.
Set<SRDFPoolMapping> tmpDestPoolsList = getSRDFPoolMappings(varray, candidatePools, varrayPoolMap, vpool, vpoolChangeVolume, capabilities.getSize());
if (tmpDestPoolsList == null || tmpDestPoolsList.isEmpty()) {
// There are no target pools from any of the target varrays that share the
// same SRDF connectivity as any of the source varray pools. Placement cannot
// be achieved.
Set<String> tmpSRDFVarrays = new HashSet<String>();
sb = new StringBuffer("No matching target pool found for varray: ");
sb.append(varray.getId());
sb.append(" and vpool: ");
sb.append(vpool.getId());
sb.append(" to varrays: ");
for (VirtualArray targetVarray : targetVarrays) {
sb.append(targetVarray.getId()).append(" ");
tmpSRDFVarrays.add(targetVarray.getLabel());
}
// No matching target pool found for varray so throw an exception
// indicating a placement error.
_log.error(sb.toString());
throw APIException.badRequests.noMatchingSRDFPools(varray.getLabel(), vpool.getLabel(), tmpSRDFVarrays);
}
// Fire business rules to determine which SRDFPoolMappings can be eliminated
// from consideration for placement.
Set<SRDFPoolMapping> srcDestPoolsList = fireSRDFPlacementRules(tmpDestPoolsList, capabilities.getResourceCount());
// the volume request configuration throw an exception.
if (srcDestPoolsList == null || srcDestPoolsList.isEmpty()) {
throw APIException.badRequests.srdfNoSolutionsFoundError();
}
// Get a new source pool list for pool selection
Set<StoragePool> sourceCandidatePoolList = new HashSet<StoragePool>();
for (SRDFPoolMapping srdfPoolMapping : srcDestPoolsList) {
sourceCandidatePoolList.add(srdfPoolMapping.sourceStoragePool);
}
// Try with the storagePoolList as it currently is.
// If we get through the process and couldn't achieve full target, we should
// take out the matched pool from the storagePoolList and try again.
List<StoragePool> sourcePoolList = new ArrayList<StoragePool>();
sourcePoolList.addAll(sourceCandidatePoolList);
// We need to create recommendations for one or more pools
// that can accommodate the number of requested resources.
// We start by trying to place all resources in a single
// pool if one exists that can accommodate all requested
// resources and work our way down as necessary trying to
// minimize the number of pools used to satisfy the request.
int recommendedCount = 0;
int currentCount = capabilities.getResourceCount();
// satisfied all of the requests.
while (!sourcePoolList.isEmpty() && recommendedCount < capabilities.getResourceCount()) {
// This request will either decrement the count OR shrink the sourcePoolList
// In the case of decrementing the count, it's because it was successful at
// placing volume(s). If it wasn't, that source pool goes in the trash and we
// try the next one.
long resourceSize = capabilities.getSize();
int resourceCount = capabilities.getResourceCount();
// We need to find a pool that matches the capacity for all the source/target luns
long requiredPoolCapacity = resourceSize * currentCount;
_log.info("Required pool capacity: " + requiredPoolCapacity);
StoragePool poolWithRequiredCapacity = _blockScheduler.getPoolMatchingCapacity(requiredPoolCapacity, resourceSize, currentCount, sourcePoolList, VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(vpool.getSupportedProvisioningType()), null);
// pool, setting the resource count for that recommendation.
if (poolWithRequiredCapacity != null) {
StoragePool recommendedPool = poolWithRequiredCapacity;
_log.debug("Recommending storage pool {} for {} resources.", recommendedPool.getId(), currentCount);
// Now we know what pool was selected, we can grab the target pools that jive with that
// source
Map<VirtualArray, List<StoragePool>> targetVarrayPoolMap = findDestPoolsForSourcePool(targetVarrays, srcDestPoolsList, recommendedPool, vpool);
if (targetVarrayPoolMap == null || targetVarrayPoolMap.isEmpty()) {
// A valid source pool was found but there are no pools from any of the
// target varrays that can protect it.
_log.info("There are no pools from any of the target varrays that can protect the source " + "varray pool {}. Will try using another source varray pool.", recommendedPool.getLabel());
// Remove the source pool and try the next one.
sourcePoolList.remove(poolWithRequiredCapacity);
} else {
// A single recommendation object will create a set of volumes for an SRDF pair.
SRDFRecommendation rec = new SRDFRecommendation();
// For each target varray, we start the process of matching source and destination
// pools to one storage system.
Map<VirtualArray, Set<StorageSystem>> varrayTargetDeviceMap = new HashMap<VirtualArray, Set<StorageSystem>>();
for (VirtualArray targetVarray1 : targetVarrayPoolMap.keySet()) {
if (rec.getSourceStoragePool() == null) {
rec.setVirtualArray(varray.getId());
rec.setVirtualPool(vpool);
rec.setSourceStoragePool(recommendedPool.getId());
rec.setResourceCount(currentCount);
rec.setSourceStorageSystem(recommendedPool.getStorageDevice());
rec.setVirtualArrayTargetMap(new HashMap<URI, Target>());
rec.setVpoolChangeVolume(vpoolChangeVolume != null ? vpoolChangeVolume.getId() : null);
rec.setVpoolChangeVpool(vpoolChangeVolume != null ? vpool.getId() : null);
}
if (targetVarrayPoolMap.get(targetVarray1) == null || targetVarrayPoolMap.get(targetVarray1).isEmpty()) {
_log.error("Could not find any suitable storage pool for target varray: " + targetVarray1.getLabel());
throw APIException.badRequests.unableToFindSuitablePoolForTargetVArray(targetVarray1.getLabel());
}
// Select the destination pool based on what was selected as source
StoragePool destinationPool = _blockScheduler.selectPool(targetVarrayPoolMap.get(targetVarray1));
_log.info("Destination target for varray " + targetVarray1.getLabel() + " was determined to be in pool: " + destinationPool.getLabel());
Target target = new Target();
target.setTargetPool(destinationPool.getId());
target.setTargetStorageDevice(destinationPool.getStorageDevice());
// Set the copy mode
Map<URI, VpoolRemoteCopyProtectionSettings> settingsMap = VirtualPool.getRemoteProtectionSettings(vpool, _dbClient);
target.setCopyMode(settingsMap.get(targetVarray1.getId()).getCopyMode());
if (target.getCopyMode() == null) {
// Set the default if not set
target.setCopyMode(RemoteDirectorGroup.SupportedCopyModes.ASYNCHRONOUS.toString());
}
// Generate a list of storage systems that match the src and dest pools lists.
Set<StorageSystem> targetDeviceList = findMatchingSRDFPools(targetVarray1, srcDestPoolsList, recommendedPool, destinationPool);
if (targetDeviceList.isEmpty()) {
_log.error("Could not find a Storage pool for target varray: " + targetVarray1.getLabel());
throw APIException.badRequests.unableToFindSuitablePoolForTargetVArray(targetVarray1.getLabel());
}
rec.getVirtualArrayTargetMap().put(targetVarray1.getId(), target);
// Add this potential solution to the map.
varrayTargetDeviceMap.put(targetVarray1, targetDeviceList);
}
// Grab any element since all varrays need to have the same SRDF connectivity.
VirtualArray firstVarray = null;
for (VirtualArray baseVarray : varrayTargetDeviceMap.keySet()) {
firstVarray = baseVarray;
break;
}
_log.info("Chose the first varray for SRDF comparison: " + firstVarray.getLabel());
// Now go through each storage system in this varray and see if it matches up
findInsertRecommendation(rec, firstVarray, recommendations, candidatePools, recommendedPool, varrayTargetDeviceMap, project, consistencyGroupUri);
// Update the count of resources for which we have created
// a recommendation.
recommendedCount += currentCount;
// Update the current count. The conditional prevents
// unnecessary attempts to look for pools of a given
// free capacity that we already know don't exist. For
// example, say we want 100 resources and the first pool
// we find that can hold multiple resources can hold only
// 10. We don't want to continue looking for pools that
// can hold 90,89,88,...11 resources. We just want to
// see if there is another pool that can hold 10 resources,
// then 9,8, and so on.
currentCount = resourceCount - recommendedCount < currentCount ? resourceCount - recommendedCount : currentCount;
}
} else {
// If we can't find a pool that can hold the current
// count of resources, decrease the count so that we look
// for pools that can hold the next smaller number.
currentCount--;
// Clear out the source pool list (which will cause failure)
sourcePoolList.clear();
}
// log an error and clear the list of recommendations.
if (recommendedCount != resourceCount) {
_log.error("Could not find matching pools for varray {} & vpool {}", varray.getId(), vpool.getId());
recommendations.clear();
// Remove the pool we chose from the list so we can try again.
sourcePoolList.remove(poolWithRequiredCapacity);
}
}
return recommendations;
}
Aggregations