use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class BlockService method startMirrors.
/**
* Start the specified mirror(s) for the source volume
*
* @param id
* the URN of a ViPR Source volume
* @param copy
* copyID Copy volume ID, none specified starts all copies
*
* @return TaskList
*/
private TaskList startMirrors(URI id, NativeContinuousCopyCreate copy) {
String taskId = UUID.randomUUID().toString();
int count = 1;
if (copy.getCount() != null) {
count = copy.getCount();
}
Volume sourceVolume = queryVolumeResource(id);
// Don't operate on VPLEX backend or RP Journal volumes.
BlockServiceUtils.validateNotAnInternalBlockObject(sourceVolume, false);
// Make sure that we don't have some pending
// operation against the volume
checkForPendingTasks(Arrays.asList(sourceVolume.getTenant().getURI()), Arrays.asList(sourceVolume));
if (count <= 0) {
throw APIException.badRequests.invalidParameterRangeLessThanMinimum("count", count, 1);
}
ArgValidator.checkEntity(sourceVolume, id, isIdEmbeddedInURL(id));
validateContinuousCopyName(copy.getName(), count, sourceVolume);
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, sourceVolume.getStorageController());
VirtualPool sourceVPool = _dbClient.queryObject(VirtualPool.class, sourceVolume.getVirtualPool());
validateMirrorCount(sourceVolume, sourceVPool, count);
// validate VMAX3 source volume for active snap sessions.
if (storageSystem != null && storageSystem.checkIfVmax3()) {
BlockServiceUtils.validateVMAX3ActiveSnapSessionsExists(sourceVolume.getId(), _dbClient, MIRRORS);
}
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, count);
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, sourceVolume.getCapacity());
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, sourceVolume.getThinlyProvisioned());
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_VOLUME_PRE_ALLOCATE_SIZE, sourceVolume.getThinVolumePreAllocationSize());
BlockServiceApi serviceApi = null;
if ((storageSystem != null) && (DiscoveredDataObject.Type.vplex.name().equals(storageSystem.getSystemType()))) {
serviceApi = getBlockServiceImpl(storageSystem.getSystemType());
} else {
serviceApi = getBlockServiceImpl("mirror");
}
return serviceApi.startNativeContinuousCopies(storageSystem, sourceVolume, sourceVPool, capabilities, copy, taskId);
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class BlockService method createVolume.
/**
* The fundamental abstraction in the Block Store is a
* volume. A volume is a unit of block storage capacity that has been
* allocated by a consumer to a project. This API allows the user to
* create one or more volumes. The volumes are created in the same
* storage pool.
*
* NOTE: This is an asynchronous operation.
*
* @prereq none
*
* @param param
* POST data containing the volume creation information.
*
* @brief Create volume
* @return A reference to a BlockTaskList containing a list of
* TaskResourceRep references specifying the task data for the
* volume creation tasks.
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public TaskList createVolume(VolumeCreate param) throws InternalException {
ArgValidator.checkFieldNotNull(param, "volume_create");
// CQECC00604134
ArgValidator.checkFieldUriType(param.getProject(), Project.class, "project");
// Get and validate the project.
Project project = _permissionsHelper.getObjectById(param.getProject(), Project.class);
ArgValidator.checkEntity(project, param.getProject(), isIdEmbeddedInURL(param.getProject()));
// Verify the user is authorized.
BlockServiceUtils.verifyUserIsAuthorizedForRequest(project, getUserFromContext(), _permissionsHelper);
// Get and validate the varray
ArgValidator.checkFieldUriType(param.getVarray(), VirtualArray.class, "varray");
VirtualArray varray = BlockServiceUtils.verifyVirtualArrayForRequest(project, param.getVarray(), uriInfo, _permissionsHelper, _dbClient);
ArgValidator.checkEntity(varray, param.getVarray(), isIdEmbeddedInURL(param.getVarray()));
// Get and validate the VirtualPool.
VirtualPool vpool = getVirtualPoolForVolumeCreateRequest(project, param);
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
// Get the count indicating the number of volumes to create. If not
// passed
// assume 1. Then get the volume placement recommendations.
Integer volumeCount = 1;
Long volumeSize = 0L;
if (param.getCount() != null) {
if (param.getCount() <= 0) {
throw APIException.badRequests.parameterMustBeGreaterThan("count", 0);
}
if (param.getCount() > MAX_VOLUME_COUNT) {
throw APIException.badRequests.exceedingLimit("count", MAX_VOLUME_COUNT);
}
volumeCount = param.getCount();
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, volumeCount);
}
if (param.getSize() != null) {
// Validate the requested volume size is greater then 0.
volumeSize = SizeUtil.translateSize(param.getSize());
if (volumeSize <= 0) {
throw APIException.badRequests.parameterMustBeGreaterThan(SIZE, 0);
}
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, volumeSize);
}
if (null != vpool.getThinVolumePreAllocationPercentage() && 0 < vpool.getThinVolumePreAllocationPercentage()) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_VOLUME_PRE_ALLOCATE_SIZE, VirtualPoolUtil.getThinVolumePreAllocationSize(vpool.getThinVolumePreAllocationPercentage(), volumeSize));
}
if (VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(vpool.getSupportedProvisioningType())) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.TRUE);
}
// Does vpool supports dedup
if (null != vpool.getDedupCapable() && vpool.getDedupCapable()) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.DEDUP, Boolean.TRUE);
}
// Validate the port group
URI portGroupURI = param.getPortGroup();
if (!NullColumnValueGetter.isNullURI(portGroupURI)) {
ArgValidator.checkFieldUriType(portGroupURI, StoragePortGroup.class, "portGroup");
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
if (portGroup == null || !RegistrationStatus.REGISTERED.name().equalsIgnoreCase(portGroup.getRegistrationStatus())) {
throw APIException.internalServerErrors.invalidObject(portGroupURI.toString());
}
// check if port group's storage system is associated to the requested virtual array
ExportUtils.validatePortGroupWithVirtualArray(portGroup, varray.getId(), _dbClient);
capabilities.put(VirtualPoolCapabilityValuesWrapper.PORT_GROUP, portGroupURI);
}
// Find the implementation that services this vpool and volume request
BlockServiceApi blockServiceImpl = getBlockServiceImpl(vpool, _dbClient);
BlockConsistencyGroup consistencyGroup = null;
final Boolean isMultiVolumeConsistencyOn = vpool.getMultivolumeConsistency() == null ? FALSE : vpool.getMultivolumeConsistency();
/*
* Validate Consistency Group:
* 1. CG should be active in the database
* 2. CG project and Volume project should match
* 3. The storage system that the CG is bonded to is associated to the
* request virtual array
*/
ArrayList<String> requestedTypes = new ArrayList<String>();
final URI actualId = project.getId();
if (param.getConsistencyGroup() != null) {
// Get and validate consistency group
consistencyGroup = queryConsistencyGroup(param.getConsistencyGroup());
// Check that the Volume project and the CG project are the same
final URI expectedId = consistencyGroup.getProject().getURI();
checkProjectsMatch(expectedId, actualId);
// attribute should be true
if (!isMultiVolumeConsistencyOn) {
throw APIException.badRequests.invalidParameterConsistencyGroupProvidedButVirtualPoolHasNoMultiVolumeConsistency(param.getConsistencyGroup(), param.getVpool());
}
// Find all volumes assigned to the group
final List<Volume> activeCGVolumes = blockServiceImpl.getActiveCGVolumes(consistencyGroup);
// Validate that the number of volumes in the group plus the number
// to be added by this request does not exceed the maximum volumes
// in a CG.
int cgMaxVolCount = blockServiceImpl.getMaxVolumesForConsistencyGroup(consistencyGroup);
if ((activeCGVolumes.size() + volumeCount.intValue()) > cgMaxVolCount) {
throw APIException.badRequests.requestedVolumeCountExceedsLimitsForCG(volumeCount.intValue(), cgMaxVolCount, consistencyGroup.getLabel());
}
// Get the requested types for provisioning (RP, VPlex, etc.)
requestedTypes = getRequestedTypes(vpool);
// If the consistency group is not yet created, verify the name is OK.
if (!consistencyGroup.created()) {
blockServiceImpl.validateConsistencyGroupName(consistencyGroup, requestedTypes);
}
// Consistency Group is already a Target, hence cannot be used to create source volume
if (consistencyGroup.srdfTarget()) {
throw APIException.badRequests.consistencyGroupBelongsToTarget(consistencyGroup.getId());
}
if (VirtualPool.vPoolSpecifiesSRDF(vpool) && (consistencyGroup.getLabel().length() > 8 || !isAlphaNumeric(consistencyGroup.getLabel()))) {
throw APIException.badRequests.groupNameCannotExceedEightCharactersoronlyAlphaNumericAllowed();
}
if (!VirtualPool.vPoolSpecifiesSRDF(vpool) && consistencyGroup.checkForType(Types.SRDF)) {
throw APIException.badRequests.nonSRDFVolumeCannotbeAddedToSRDFCG();
}
if (VirtualPool.vPoolSpecifiesSRDF(vpool)) {
List<Volume> nativeVolumesInCG = BlockConsistencyGroupUtils.getActiveNativeVolumesInCG(consistencyGroup, _dbClient);
for (Volume nativeVolume : nativeVolumesInCG) {
// Cannot add volumes if in swapped state. This is a limitation that will eventually be removed.
if (Volume.LinkStatus.SWAPPED.name().equals(nativeVolume.getLinkStatus())) {
throw BadRequestException.badRequests.cannotAddVolumesToSwappedCG(consistencyGroup.getLabel());
}
}
}
// check if CG's storage system is associated to the requested virtual array
validateCGValidWithVirtualArray(consistencyGroup, varray);
// the CG's previously requested types.
if (consistencyGroup.creationInitiated()) {
if (!consistencyGroup.getRequestedTypes().containsAll(requestedTypes)) {
throw APIException.badRequests.consistencyGroupIsNotCompatibleWithRequest(consistencyGroup.getId(), consistencyGroup.getRequestedTypes().toString(), requestedTypes.toString());
}
}
Volume existingRpSourceVolume = null;
// RP consistency group validation
if (VirtualPool.vPoolSpecifiesProtection(vpool)) {
// Check to see if the CG has any RecoverPoint provisioned volumes. This is done by looking at the protectionSet field.
// The protectionSet field won't be set until the volume is provisioned in RP so this check allows concurrent
// requests to go through.
boolean cgHasRpProvisionedVolumes = false;
if (activeCGVolumes != null && !activeCGVolumes.isEmpty()) {
for (Volume vol : activeCGVolumes) {
if (!NullColumnValueGetter.isNullNamedURI(vol.getProtectionSet())) {
_log.info(String.format("Determined that consistency group %s contains RP provisioned volumes.", consistencyGroup.getId()));
cgHasRpProvisionedVolumes = true;
break;
}
}
}
// Ensure the CG is either empty or has been tagged for RP and contains properly provisioned RP volumes.
if (cgHasRpProvisionedVolumes && !consistencyGroup.getTypes().contains(BlockConsistencyGroup.Types.RP.toString())) {
throw APIException.badRequests.consistencyGroupMustBeEmptyOrContainRpVolumes(consistencyGroup.getId());
}
if (!activeCGVolumes.isEmpty()) {
// Find the first existing source volume for source/target varray comparison.
for (Volume cgVolume : activeCGVolumes) {
if (cgVolume.getPersonality() != null && cgVolume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
existingRpSourceVolume = cgVolume;
break;
}
}
if (existingRpSourceVolume != null) {
VirtualPool existingVpool = _dbClient.queryObject(VirtualPool.class, existingRpSourceVolume.getVirtualPool());
VirtualPool requestedVpool = _dbClient.queryObject(VirtualPool.class, param.getVpool());
// The source virtual arrays must much
if (existingVpool.getVirtualArrays().size() != requestedVpool.getVirtualArrays().size() || !existingVpool.getVirtualArrays().containsAll(requestedVpool.getVirtualArrays())) {
// The source virtual arrays are not compatible with the CG
throw APIException.badRequests.vPoolSourceVarraysNotCompatibleForCG(consistencyGroup.getLabel());
}
// MetroPoint volumes in the same CG.
if (VirtualPool.vPoolSpecifiesHighAvailability(existingVpool) && VirtualPool.vPoolSpecifiesHighAvailability(requestedVpool)) {
// that we are not trying to mix MetroPoint volumes with Metro volumes.
if ((!VirtualPool.vPoolSpecifiesMetroPoint(requestedVpool) && VirtualPool.vPoolSpecifiesMetroPoint(existingVpool)) || (VirtualPool.vPoolSpecifiesMetroPoint(requestedVpool) && !VirtualPool.vPoolSpecifiesMetroPoint(existingVpool))) {
throw APIException.badRequests.cannotMixMetroPointAndNonMetroPointVolumes(consistencyGroup.getLabel());
}
}
// Check the target virtual arrays
StringMap existingProtectionVarraySettings = existingVpool.getProtectionVarraySettings();
if (existingProtectionVarraySettings == null) {
// NOTE: This will be supported in the future through Jira CTRL-10129
throw APIException.badRequests.cannotAddVolumesToSwappedCG(consistencyGroup.getLabel());
}
StringMap requestedProtectionVarraySettings = requestedVpool.getProtectionVarraySettings();
if (existingProtectionVarraySettings.size() != requestedProtectionVarraySettings.size()) {
// The target virtual arrays are not compatible with the CG
throw APIException.badRequests.vPoolTargetVarraysNotCompatibleForCG(consistencyGroup.getLabel());
}
for (String targetVarray : requestedProtectionVarraySettings.keySet()) {
if (!existingProtectionVarraySettings.containsKey(targetVarray)) {
// The target virtual arrays are not compatible with the CG
throw APIException.badRequests.vPoolTargetVarraysNotCompatibleForCG(consistencyGroup.getLabel());
}
}
// Ensure the replication mode is logically equivalent
String requestedRpCopyMode = NullColumnValueGetter.isNullValue(requestedVpool.getRpCopyMode()) ? RPCopyMode.ASYNCHRONOUS.name() : requestedVpool.getRpCopyMode();
String existingRpCopyMode = NullColumnValueGetter.isNullValue(existingVpool.getRpCopyMode()) ? RPCopyMode.ASYNCHRONOUS.name() : existingVpool.getRpCopyMode();
if (!requestedRpCopyMode.equalsIgnoreCase(existingRpCopyMode)) {
throw APIException.badRequests.vPoolRPCopyModeNotCompatibleForCG(consistencyGroup.getLabel());
}
}
}
}
// are still attached to their source volumes.
if (!activeCGVolumes.isEmpty()) {
// Pass in an active CG volume for validation. If we are dealing with a RecoverPoint
// consistency group, we need to use an RP source volume. Otherwise we can use any arbitrary
// CG volume.
Volume activeCGVolume = existingRpSourceVolume == null ? activeCGVolumes.get(0) : existingRpSourceVolume;
if (!BlockServiceUtils.checkCGVolumeCanBeAddedOrRemoved(consistencyGroup, activeCGVolume, _dbClient)) {
checkCGForMirrors(consistencyGroup, activeCGVolumes);
checkCGForSnapshots(consistencyGroup);
getFullCopyManager().verifyNewVolumesCanBeCreatedInConsistencyGroup(consistencyGroup, activeCGVolumes);
}
}
capabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, param.getConsistencyGroup());
} else if (VirtualPool.vPoolSpecifiesProtection(vpool)) {
// protection is specified, a consistency group must be selected.
throw APIException.badRequests.consistencyGroupMissingForRpProtection();
}
// verify quota
long size = volumeCount * SizeUtil.translateSize(param.getSize());
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, project.getTenantOrg().getURI());
ArgValidator.checkEntity(tenant, project.getTenantOrg().getURI(), false);
CapacityUtils.validateQuotasForProvisioning(_dbClient, vpool, project, tenant, size, "volume");
// set compute param
URI computeURI = param.getComputeResource();
if (!NullColumnValueGetter.isNullURI(computeURI)) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.COMPUTE, computeURI.toString());
}
// COP-14028
// Changing the return of a TaskList to return immediately while the underlying tasks are
// being built up. Steps:
// 1. Create a task object ahead of time and persist it for each requested volume.
// 2. Fire off a thread that does the placement and preparation of the volumes, which will use the pre-created
// task/volume objects during their source volume creations.
// 3. Return to the caller the new Task objects that is in the pending state.
String task = UUID.randomUUID().toString();
TaskList taskList = createVolumeTaskList(param.getSize(), project, varray, vpool, param.getName(), task, volumeCount);
// This is causing exceptions when run in the thread.
auditOp(OperationTypeEnum.CREATE_BLOCK_VOLUME, true, AuditLogManager.AUDITOP_BEGIN, param.getName(), volumeCount, varray.getId().toString(), actualId.toString());
// call thread that does the work.
CreateVolumeSchedulingThread.executeApiTask(this, _asyncTaskService.getExecutorService(), _dbClient, varray, project, vpool, capabilities, taskList, task, consistencyGroup, requestedTypes, param, blockServiceImpl);
_log.info("Kicked off thread to perform placement and scheduling. Returning " + taskList.getTaskList().size() + " tasks");
return taskList;
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class RecoverPointScheduler method scheduleStorageForVpoolChangeProtected.
/**
* Scheduler for a Vpool change from a protected VPLEX Virtual volume to a different type
* of protection. Ex: RP+VPLEX upgrade to MetroPoint
*
* @param volume volume that is being changed to a protected vpool
* @param newVpool vpool requested to change to (must be protected)
* @param protectionVarrays Varrays to protect this volume to.
* @param vpoolChangeParam The change param for the vpool change operation
* @return list of Recommendation objects to satisfy the request
*/
public List<Recommendation> scheduleStorageForVpoolChangeProtected(Volume volume, VirtualPool newVpool, List<VirtualArray> protectionVirtualArraysForVirtualPool) {
_log.info(String.format("Schedule storage for vpool change to vpool [%s : %s] for volume [%s : %s]", newVpool.getLabel(), newVpool.getId().toString(), volume.getLabel(), volume.getId().toString()));
this.initResources();
VirtualPool currentVpool = dbClient.queryObject(VirtualPool.class, volume.getVirtualPool());
VirtualArray varray = dbClient.queryObject(VirtualArray.class, volume.getVirtualArray());
// Swap src and ha if the flag has been set on the vpool
SwapContainer container = this.swapSrcAndHAIfNeeded(varray, newVpool);
Project project = dbClient.queryObject(Project.class, volume.getProject());
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, volume.getCapacity());
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, 1);
capabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, volume.getConsistencyGroup());
List<StoragePool> sourcePools = new ArrayList<StoragePool>();
List<StoragePool> haPools = new ArrayList<StoragePool>();
VirtualArray haVarray = vplexScheduler.getHaVirtualArray(container.getSrcVarray(), project, container.getSrcVpool());
VirtualPool haVpool = vplexScheduler.getHaVirtualPool(container.getSrcVarray(), project, container.getSrcVpool());
// Recommendations to return
List<Recommendation> recommendations = Lists.newArrayList();
// Upgrade RP+VPLEX to MetroPoint
if (VirtualPool.vPoolSpecifiesRPVPlex(currentVpool) && VirtualPool.vPoolSpecifiesMetroPoint(newVpool)) {
// We already have our VPLEX Metro source and targets provisioned.
// We're going to leverage this for placement.
_log.info("Scheduling storage for upgrade to MetroPoint, we need to place a HA/Stand-by/Secondary Journal");
// Get a handle on the existing source and ha volumes, we want to use the references to their
// existing storage pools to pass to the RP Scheduler.
Volume sourceBackingVolume = null;
Volume haBackingVolume = null;
if (null == volume.getAssociatedVolumes() || volume.getAssociatedVolumes().isEmpty()) {
_log.error("VPLEX volume {} has no backend volumes.", volume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(volume.forDisplay());
}
for (String associatedVolumeId : volume.getAssociatedVolumes()) {
URI associatedVolumeURI = URI.create(associatedVolumeId);
Volume backingVolume = dbClient.queryObject(Volume.class, associatedVolumeURI);
if (backingVolume.getVirtualArray().equals(volume.getVirtualArray())) {
sourceBackingVolume = backingVolume;
} else {
haBackingVolume = backingVolume;
}
}
// We already have a source vpool from the (the existing one), so just add that one only to the list.
sourcePools.add(dbClient.queryObject(StoragePool.class, sourceBackingVolume.getPool()));
haPools.add(dbClient.queryObject(StoragePool.class, haBackingVolume.getPool()));
// Obtain a list of RP protection Virtual Arrays.
List<VirtualArray> tgtVarrays = RecoverPointScheduler.getProtectionVirtualArraysForVirtualPool(project, container.getSrcVpool(), dbClient, _permissionsHelper);
recommendations = createMetroPointRecommendations(container.getSrcVarray(), tgtVarrays, container.getSrcVpool(), haVarray, haVpool, project, capabilities, sourcePools, haPools, volume);
}
// There is only one entry of type RPProtectionRecommendation ever in the returned recommendation list.
_log.info(String.format("%s %n", ((RPProtectionRecommendation) recommendations.get(0)).toString(dbClient)));
return recommendations;
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class SRDFScheduler method scheduleStorageForCosChangeUnprotected.
/**
* Scheduler for a vpool change from an unprotected volume to a protected volume.
*
* @param volume
* volume that is being changed to a protected vpool
* @param vpool
* vpool requested to change to (must be protected)
* @param targetVarrays
* Varrays to protect this volume to.
* @return list of Recommendation objects to satisfy the request
*/
public List<Recommendation> scheduleStorageForCosChangeUnprotected(final Volume volume, final VirtualPool vpool, final List<VirtualArray> targetVarrays, final VirtualPoolChangeParam param) {
_log.debug("Schedule storage for vpool change to vpool {} for volume {}.", String.valueOf(vpool.getId()), String.valueOf(volume.getId()));
List<StoragePool> matchedPoolsForVpool = VirtualPool.getValidStoragePools(vpool, _dbClient, true);
// Make sure our pool is in this list; this is a check to ensure the pool is in our existing
// varray and new vpool.
StoragePool sourcePool = null;
Iterator<StoragePool> iter = matchedPoolsForVpool.iterator();
while (iter.hasNext()) {
StoragePool pool = iter.next();
if (pool.getId().equals(volume.getPool())) {
sourcePool = pool;
break;
}
}
if (sourcePool == null) {
// We could not verify the source pool exists in the new vpool and existing varray, return appropriate error
_log.error("Volume's storage pool does not belong to vpool {} .", vpool.getLabel());
throw APIException.badRequests.noMatchingStoragePoolsForVpoolAndVarray(vpool.getLabel(), volume.getVirtualArray().toString());
}
VirtualPoolCapabilityValuesWrapper wrapper = new VirtualPoolCapabilityValuesWrapper();
wrapper.put(VirtualPoolCapabilityValuesWrapper.SIZE, volume.getCapacity());
wrapper.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
wrapper.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, volume.getConsistencyGroup());
// Schedule storage based on source volume storage pool
List<StoragePool> sourcePools = new ArrayList<StoragePool>();
sourcePools.add(sourcePool);
return scheduleStorageSourcePoolConstraint(_dbClient.queryObject(VirtualArray.class, volume.getVirtualArray()), _dbClient.queryObject(Project.class, volume.getProject().getURI()), vpool, wrapper, sourcePools, volume, volume.getConsistencyGroup());
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class VPlexScheduler method scheduleStorageForDistributedVPLEXVolume.
/**
* Get recommendations for resource placement for distributed VLPEX volumes.
*
* @param srcVarray The virtual array in which the resources were requested.
* @param requestedVPlexSystems The URIs of the VPlex systems to which
* placement should be limited, or null when it doesn't matter.
* @param srcStorageSystem The URI of a specific backend storage system to
* which the source resource should be limited, or null when it
* doesn't matter.
* @param srcVpool The virtual pool requested for the source resources.
* @param haVarray The desired HA varray.
* @param haVpool The virtual pool for the HA resources.
* @param capabilities The virtual pool capabilities.
*
* @return A list of VPlexRecommendation instances specifying the
* recommended resource placement.
*/
private List<Recommendation> scheduleStorageForDistributedVPLEXVolume(VirtualArray srcVarray, Set<URI> requestedVPlexSystems, URI srcStorageSystem, VirtualPool srcVpool, VirtualArray haVarray, VirtualPool haVpool, VirtualPoolCapabilityValuesWrapper capabilities, Project project, VpoolUse srcVpoolUse, Map<VpoolUse, List<Recommendation>> currentRecommendations) {
_log.info("Executing VPLEX high availability placement strategy for Distributed VPLEX Volumes.");
// Initialize the list of recommendations.
List<Recommendation> recommendations = new ArrayList<Recommendation>();
// of the StorageSystemsMatcher.
if (srcStorageSystem != null) {
StorageSystem sourceStorageSystem = _dbClient.queryObject(StorageSystem.class, srcStorageSystem);
capabilities.put(VirtualPoolCapabilityValuesWrapper.SOURCE_STORAGE_SYSTEM, sourceStorageSystem);
}
// Call the lower level scheduler to get its recommendations.
Scheduler nextScheduler = _placementManager.getNextScheduler(SCHEDULER_NAME, srcVpool, srcVpoolUse);
_log.info(String.format("Calling next scheduler: %s", nextScheduler.getClass().getSimpleName()));
List<Recommendation> baseRecommendations = nextScheduler.getRecommendationsForVpool(srcVarray, project, srcVpool, srcVpoolUse, capabilities, currentRecommendations);
_log.info(String.format("Received %d recommendations from %s", baseRecommendations.size(), nextScheduler.getClass().getSimpleName()));
if (baseRecommendations.isEmpty()) {
throw BadRequestException.badRequests.noVplexLocalRecommendationFromSubScheduler(nextScheduler.getClass().getSimpleName(), srcVpool.getLabel(), srcVarray.getLabel());
}
_log.info(String.format("Received %d recommendations from %s", baseRecommendations.size(), nextScheduler.getClass().getSimpleName()));
List<StoragePool> allMatchingPoolsForSrcVarray = _placementManager.getStoragePoolsFromRecommendations(baseRecommendations);
_log.info("Found {} matching pools for source varray", allMatchingPoolsForSrcVarray.size());
URI cgURI = capabilities.getBlockConsistencyGroup();
BlockConsistencyGroup cg = (cgURI == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, cgURI));
// Sort the matching pools for the source varray by VPLEX system.
Map<String, List<StoragePool>> vplexPoolMapForSrcVarray = getVPlexConnectedMatchingPools(srcVarray, requestedVPlexSystems, capabilities, allMatchingPoolsForSrcVarray);
if (vplexPoolMapForSrcVarray.isEmpty()) {
_log.info("No matching pools on storage systems connected to a VPLEX");
// and there are none for that VPlex system.
return recommendations;
}
// Get all storage pools that match the passed HA VirtualPool params,
// and HA virtual array. In addition, the pool must have enough
// capacity to hold at least one resource of the requested size.
_log.info("Getting all matching pools for HA varray {}", haVarray.getId());
URI haStorageSystem = null;
VirtualPoolCapabilityValuesWrapper haCapabilities = new VirtualPoolCapabilityValuesWrapper(capabilities);
// Don't look for SRDF in the HA side.
haCapabilities.put(VirtualPoolCapabilityValuesWrapper.PERSONALITY, null);
// We don't require that the HA side have the same storage controller.
haCapabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, null);
Map<String, Object> attributeMap = new HashMap<String, Object>();
List<StoragePool> allMatchingPoolsForHaVarray = getMatchingPools(haVarray, haStorageSystem, haVpool, haCapabilities, attributeMap);
if (allMatchingPoolsForHaVarray.isEmpty()) {
StringBuffer errorMessage = new StringBuffer();
if (attributeMap.get(AttributeMatcher.ERROR_MESSAGE) != null) {
errorMessage = (StringBuffer) attributeMap.get(AttributeMatcher.ERROR_MESSAGE);
}
throw BadRequestException.badRequests.noMatchingHighAvailabilityStoragePools(haVpool.getLabel(), haVarray.getLabel(), errorMessage.toString());
}
_log.info("Found {} matching pools for HA varray", allMatchingPoolsForHaVarray.size());
// Sort the matching pools for the HA varray by VPLEX system.
Map<String, List<StoragePool>> vplexPoolMapForHaVarray = sortPoolsByVPlexStorageSystem(allMatchingPoolsForHaVarray, haVarray.getId().toString());
if (vplexPoolMapForHaVarray.isEmpty()) {
_log.info("No matching pools on storage systems connected to a VPlex");
// on storage systems connected to a VPlex storage system.
return recommendations;
}
// Get the list of potential VPlex storage systems for the source
// virtual array.
Set<String> vplexStorageSystemIds = vplexPoolMapForSrcVarray.keySet();
_log.info("{} VPlex storage systems have matching pools", vplexStorageSystemIds.size());
// Get the possible high availability varrays for each of these
// potential VPlex storage system.
Map<String, List<String>> vplexHaVarrayMap = ConnectivityUtil.getVPlexVarrays(_dbClient, vplexStorageSystemIds, srcVarray.getId());
// Loop over the potential VPlex storage systems, and attempt
// to place the resources.
Iterator<String> vplexSystemIdsIter = vplexStorageSystemIds.iterator();
while ((vplexSystemIdsIter.hasNext()) && (recommendations.isEmpty())) {
String vplexStorageSystemId = vplexSystemIdsIter.next();
_log.info("Attempting placement on VPlex {}", vplexStorageSystemId);
// Check if this VPLEX can satisfy the requested HA varray.
List<String> vplexHaVarrays = vplexHaVarrayMap.get(vplexStorageSystemId);
if (!vplexHaVarrays.contains(haVarray.getId().toString())) {
// It cannot, try the next VPLEX.
continue;
}
// Check if there are HA storage pools for this VPLEX.
if (!vplexPoolMapForHaVarray.containsKey(vplexStorageSystemId)) {
// There are no HA pools for this VPLEX, try the next.
continue;
}
// Check if the resource can be placed on the matching
// pools for this VPlex storage system in the source varray.
List<Recommendation> recommendationsForSrcVarray = new ArrayList<Recommendation>();
recommendationsForSrcVarray.addAll(createVPlexRecommendations(baseRecommendations, vplexStorageSystemId, srcVarray, srcVpool));
if (recommendationsForSrcVarray.isEmpty()) {
_log.info("Matching pools for source varray insufficient for placement");
// not sufficient, so we need to try another VPlex.
continue;
}
// Get the storage systems specified by these recommendations.
// We don't want to use these same storage systems on the HA
// side when the same system is available to both, else you
// could create a distributed volume with both backend volumes
// on the same physical array.
Set<URI> recommendedSrcSystems = new HashSet<URI>();
for (Recommendation recommendation : recommendationsForSrcVarray) {
recommendedSrcSystems.add(recommendation.getSourceStorageSystem());
}
// Remove any storage pools on these systems from the list of
// matching pools for the HA varray for this VPLEX system.
boolean haPoolsLimitedBySrcSelections = false;
List<StoragePool> vplexPoolsForHaVarray = new ArrayList<StoragePool>(vplexPoolMapForHaVarray.get(vplexStorageSystemId));
Iterator<StoragePool> vplexPoolsForHaVarrayIter = vplexPoolsForHaVarray.iterator();
while (vplexPoolsForHaVarrayIter.hasNext()) {
StoragePool haPool = vplexPoolsForHaVarrayIter.next();
URI poolSystem = haPool.getStorageDevice();
if (recommendedSrcSystems.contains(poolSystem)) {
_log.info("Removing pool {} on system {} from consideration for HA placement", haPool.getId(), poolSystem);
vplexPoolsForHaVarrayIter.remove();
haPoolsLimitedBySrcSelections = true;
}
}
// Now check if the resource can be placed on the matching
// pools for this VPlex storage system in the HA varray.
List<Recommendation> recommendationsForHaVarray = getRecommendationsForPools(haVarray.getId().toString(), haVpool, vplexPoolsForHaVarray, capabilities);
if (recommendationsForHaVarray.isEmpty()) {
_log.info("Matching pools for HA varray insufficient for placement");
if (haPoolsLimitedBySrcSelections) {
// If we limited the pools on the HA side and failed to place,
// then let's reverse and use all pools on the HA side and limit
// the source side. This is certainly not perfect, but at least
// will try and use the pools on both sides before giving up.
recommendationsForHaVarray = getRecommendationsForPools(haVarray.getId().toString(), haVpool, vplexPoolMapForHaVarray.get(vplexStorageSystemId), capabilities);
if (recommendationsForHaVarray.isEmpty()) {
// Still can't place them on the HA side.
_log.info("Matching pools for HA varray still insufficient for placement");
continue;
} else {
// Remove the systems from the source side and see
// if the source side can still be placed when limited.
_log.info("Matching pools for HA varray now sufficient for placement");
}
} else {
// not sufficient, so we need to try another VPlex.
continue;
}
}
// We have recommendations for pools in both the source and HA varrays.
recommendations.addAll(recommendationsForSrcVarray);
recommendations.addAll(createVPlexRecommendations(vplexStorageSystemId, haVarray, haVpool, recommendationsForHaVarray));
_log.info("Done trying to place resources for VPlex.");
break;
}
_placementManager.logRecommendations("VPLEX Distributed", recommendations);
return recommendations;
}
Aggregations