use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method upgradeToMetroPointVolume.
/**
* Upgrade a local block volume to a protected RP volume
*
* @param volume the existing volume being protected.
* @param newVpool the requested virtual pool
* @param taskId the task identifier
* @throws InternalException
*/
private void upgradeToMetroPointVolume(Volume volume, VirtualPool newVpool, VirtualPoolChangeParam vpoolChangeParam, String taskId) throws InternalException {
_log.info(String.format("Upgrade [%s] to MetroPoint", volume.getLabel()));
Project project = _dbClient.queryObject(Project.class, volume.getProject());
// Now that we have a handle on the current vpool, let's set the new vpool on the volume.
// The volume will not be persisted just yet but we need to have the new vpool to
// properly make placement decisions and to add reference to the new vpool to the
// recommendation objects that will be created.
URI currentVpool = volume.getVirtualPool();
volume.setVirtualPool(newVpool.getId());
List<Recommendation> recommendations = getRecommendationsForVirtualPoolChangeRequest(volume, newVpool, vpoolChangeParam, null);
volume.setVirtualPool(currentVpool);
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolume();
}
// Get the volume's varray
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, volume.getVirtualArray());
// Generate a VolumeCreate object that contains the information that createVolumes likes to consume.
VolumeCreate param = new VolumeCreate(volume.getLabel(), String.valueOf(volume.getCapacity()), 1, newVpool.getId(), volume.getVirtualArray(), volume.getProject().getURI());
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, 1);
capabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, volume.getConsistencyGroup());
TaskList taskList = new TaskList();
createTaskForVolume(volume, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VPOOL, taskList, taskId);
Map<VpoolUse, List<Recommendation>> recommendationMap = new HashMap<VpoolUse, List<Recommendation>>();
recommendationMap.put(VpoolUse.ROOT, recommendations);
createVolumes(param, project, varray, newVpool, recommendationMap, taskList, taskId, capabilities);
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method prepareRecommendedVolumes.
/**
* Prepare Recommended Volumes for Protected scenarios only.
*
* This method is responsible for acting the same as the unprotected "prepareRecommendedVolumes" call,
* however it needs to create multiple volumes per single volume requests in order to generate protection.
*
* Those most typical scenario is, that for any one volume requested in a CRR configuration, we create:
* 1. One Source Volume
* 2. One Source Journal Volume (minimum 10GB, otherwise 2.5X source size)
* 3. One Target Volume on protection varray
* 4. One Target Journal Volume on protection varray
*
* In a CLR configuration, there are additional volumes created for the Local Target and Local Target Journal.
*
* This method will assemble a ProtectionSet object in Cassandra that will describe the Protection that
* will be created on the Protection System.
*
* When other protection mechanisms come on board, the RP-ness of this method will need to be pulled out.
*
* @param param volume create request
* @param task task from request or generated
* @param taskList task list
* @param project project from request
* @param originalVarray varray from request
* @param originalVpool vpool from request
* @param numberOfVolumesInRequest volume count from the request
* @param recommendations list of resulting recommendations from placement
* @param consistencyGroup consistency group ID
* @param capabilities Capabilities object
* @param descriptors List of descriptors to be populated
* @param volumeURIs List to hold volumes that have been prepared
*/
private void prepareRecommendedVolumes(VolumeCreate param, String task, TaskList taskList, Project project, VirtualArray originalVarray, VirtualPool originalVpool, Integer numberOfVolumesInRequest, List<Recommendation> recommendations, String volumeLabel, VirtualPoolCapabilityValuesWrapper capabilities, List<VolumeDescriptor> descriptors, List<URI> volumeURIs) throws APIException {
boolean isChangeVpool = false;
boolean isChangeVpoolForProtectedVolume = false;
boolean isSrcAndHaSwapped = VirtualPool.isRPVPlexProtectHASide(originalVpool);
boolean metroPointEnabled = VirtualPool.vPoolSpecifiesMetroPoint(originalVpool);
// This copy of capabilities object is meant to be used by all volume prepares that require changing data,
// which is our case is TARGET and JOURNALS. SOURCE will use always use the main capabilities object.
VirtualPoolCapabilityValuesWrapper copyOfCapabilities = new VirtualPoolCapabilityValuesWrapper(capabilities);
// Set the volume name from the param
String volumeName = volumeLabel;
// Need to check if we should swap src and ha, call the block scheduler code to
// find out. Nothing will be changed for MetroPoint.
VirtualArray haVarray = null;
VirtualPool haVpool = null;
SwapContainer container = this.getBlockScheduler().new SwapContainer();
container.setSrcVarray(originalVarray);
container.setSrcVpool(originalVpool);
container.setHaVarray(haVarray);
container.setHaVpool(haVpool);
container = RecoverPointScheduler.initializeSwapContainer(container, _dbClient);
// Use the new references post swap
VirtualArray varray = container.getSrcVarray();
VirtualPool vpool = container.getSrcVpool();
// Save a reference to the CG, we'll need this later
BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// Total volumes to be created
int totalVolumeCount = 0;
// Create an entire Protection object for each recommendation result.
Iterator<Recommendation> recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
RPProtectionRecommendation rpProtectionRec = (RPProtectionRecommendation) recommendationsIter.next();
URI protectionSystemURI = rpProtectionRec.getProtectionDevice();
URI changeVpoolVolumeURI = rpProtectionRec.getVpoolChangeVolume();
Volume changeVpoolVolume = (changeVpoolVolumeURI == null ? null : _dbClient.queryObject(Volume.class, changeVpoolVolumeURI));
isChangeVpool = (changeVpoolVolumeURI != null);
isChangeVpoolForProtectedVolume = rpProtectionRec.isVpoolChangeProtectionAlreadyExists();
boolean addJournalForStandbySourceCopy = capabilities.getAddJournalCapacity() && (rpProtectionRec.getStandbyJournalRecommendation() != null);
String newVolumeLabel = volumeName;
// Find the Source RP Copy Name
String sourceCopyName = retrieveRpCopyName(originalVpool, varray, consistencyGroup, true);
String standbySourceCopyName = "";
if (addJournalForStandbySourceCopy) {
// Find the Source Standby RP Copy Name - for add journal operation
standbySourceCopyName = retrieveRpCopyName(originalVpool, varray, consistencyGroup, true);
}
if (metroPointEnabled) {
// Find the Source Standby RP Copy Name - for MetorPoint
haVarray = _dbClient.queryObject(VirtualArray.class, VPlexUtil.getHAVarray(originalVpool));
standbySourceCopyName = retrieveRpCopyName(originalVpool, haVarray, consistencyGroup, true);
}
StringBuffer volumeInfoBuffer = new StringBuffer();
volumeInfoBuffer.append(String.format(NEW_LINE));
// Prepare the Journals first
try {
prepareRpJournals(rpProtectionRec, project, consistencyGroup, vpool, originalVpool, param, numberOfVolumesInRequest, newVolumeLabel, isChangeVpoolForProtectedVolume, copyOfCapabilities, protectionSystemURI, taskList, task, descriptors, volumeURIs, volumeInfoBuffer, sourceCopyName, standbySourceCopyName);
} catch (Exception e) {
_log.error("Error trying to prepare RP Journal volumes", e);
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(newVolumeLabel);
}
// Prepare the source and targets
if (rpProtectionRec.getSourceRecommendations() != null) {
for (RPRecommendation sourceRec : rpProtectionRec.getSourceRecommendations()) {
// Get a reference to all existing VPLEX Source volumes (if any)
List<Volume> allSourceVolumesInCG = BlockConsistencyGroupUtils.getActiveVplexVolumesInCG(consistencyGroup, _dbClient, Volume.PersonalityTypes.SOURCE);
// first MP volume of a new CG.
if (metroPointEnabled && allSourceVolumesInCG.isEmpty()) {
validateMetroPointType(sourceRec.getMetroPointType());
}
// Get the number of volumes needed to be created for this recommendation.
int volumeCountInRec = sourceRec.getResourceCount();
// All source volumes will share the same secondary journal.
if (isChangeVpoolForProtectedVolume) {
_log.info(String.format("Change Virtual Pool Protected: %d existing source volume(s) in CG [%s](%s) are affected.", allSourceVolumesInCG.size(), consistencyGroup.getLabel(), consistencyGroup.getId()));
// Force the count to the number of existing source volumes in the CG.
volumeCountInRec = allSourceVolumesInCG.size();
}
// Grab a handle of the haRec, it could be null which is Ok.
RPRecommendation haRec = sourceRec.getHaRecommendation();
for (int volumeCount = 0; volumeCount < volumeCountInRec; volumeCount++) {
// Let's not get into multiple of multiples, this class will handle multi volume creates.
// So force the incoming VolumeCreate param to be set to 1 always from here on.
sourceRec.setResourceCount(1);
if (haRec != null) {
haRec.setResourceCount(1);
}
newVolumeLabel = generateDefaultVolumeLabel(volumeName, totalVolumeCount, numberOfVolumesInRequest);
// Grab the existing volume and task object from the incoming task list
Volume preCreatedVolume = StorageScheduler.getPrecreatedVolume(_dbClient, taskList, newVolumeLabel);
// Assemble a Replication Set; A Collection of volumes. One production, and any number of
// targets.
String rsetName = "RSet-" + newVolumeLabel;
// Increment total volume count
totalVolumeCount++;
// This name has to remain unique, especially when the number of volumes requested to be created
// is more than 1.
param.setName(newVolumeLabel);
Volume sourceVolume = null;
// /////// SOURCE ///////////
if (!isChangeVpoolForProtectedVolume) {
if (isChangeVpool) {
_log.info(String.format("Change Vpool, use existing Source Volume [%s].", changeVpoolVolume.getLabel()));
} else {
_log.info("Create RP Source Volume...");
}
// Create the source
sourceVolume = createRecoverPointVolume(sourceRec, newVolumeLabel, project, capabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.SOURCE, rsetName, preCreatedVolume, null, taskList, task, sourceCopyName, descriptors, changeVpoolVolume, isChangeVpool, isSrcAndHaSwapped, true);
} else {
if (metroPointEnabled) {
_log.info("Upgrade to MetroPoint operation...");
// in the CG to reference the newly created stand-by journal.
for (Volume sourceVol : allSourceVolumesInCG) {
_log.info(String.format("Update the source volume [%s](%s) with new standby journal.", sourceVol.getLabel(), sourceVol.getId()));
// All RP+VPLEX Metro volumes in this CG need to have their backing volume
// references updated with the internal site names for exports.
setInternalSitesForSourceBackingVolumes(sourceRec, haRec, sourceVol, true, false, originalVpool.getHaVarrayConnectedToRp(), sourceCopyName, standbySourceCopyName);
// We need to have all the existing RP+VPLEX Metro volumes from the CG
// added to the volumeURI list so we can properly export the standby
// leg to RP for each volume.
volumeURIs.add(sourceVol.getId());
}
} else {
// NOTE: Upgrade to MetroPoint is (currently) the only supported Change Virtual Pool Protected
// operation, so if we have a null standby journal we're in real trouble.
_log.error("Error trying to upgrade to MetroPoint. Standby journal is null.");
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(newVolumeLabel);
}
// past this point.
break;
}
volumeURIs.add(sourceVolume.getId());
// NOTE: This is only needed for MetroPoint and Distributed RP+VPLEX(HA as RP source),
// nothing will happen for regular RP volumes.
//
// Source volumes need to have their backing volumes set with the correct internal
// site name. The reason for this is so we know later on where to export the volumes to.
//
// This is very evident with MetroPoint as we need to export BOTH sides of the VPLEX Distributed
// Volume.
//
// This is less evident with Distributed RP+VPLEX that has "HA as RP source" set.
// In this case we need to set it on the HA volume as that is the side to export (not the source
// side).
// To do this we need to pass in a hint...
// We need the (unswapped) original vpool and we then check the getHaVarrayConnectedToRp() value
// which tells us
// which side(varray) to export.
// This value will only be used if isSrcAndHaSwapped == true.
setInternalSitesForSourceBackingVolumes(sourceRec, haRec, sourceVolume, metroPointEnabled, isSrcAndHaSwapped, originalVpool.getHaVarrayConnectedToRp(), sourceCopyName, standbySourceCopyName);
// /////// TARGET(S) ///////////
List<URI> protectionTargets = new ArrayList<URI>();
for (RPRecommendation targetRec : sourceRec.getTargetRecommendations()) {
// Keep track of the targets created
protectionTargets.add(targetRec.getVirtualArray());
// Grab the target's varray
VirtualArray targetVirtualArray = _dbClient.queryObject(VirtualArray.class, targetRec.getVirtualArray());
_log.info(String.format("Create Target (%s)...", targetVirtualArray.getLabel()));
// to provision this target.
if (isChangeVpoolForProtectedVolume) {
Volume alreadyProvisionedTarget = RPHelper.findAlreadyProvisionedTargetVolume(changeVpoolVolume, targetRec.getVirtualArray(), _dbClient);
if (alreadyProvisionedTarget != null) {
_log.info(String.format("Existing target volume [%s] found for varray [%s].", alreadyProvisionedTarget.getLabel(), targetVirtualArray.getLabel()));
// No need to go further, continue on to the next target varray
continue;
}
}
// Generate target volume name
String targetVolumeName = new StringBuilder(newVolumeLabel).append(VOLUME_TYPE_TARGET + targetVirtualArray.getLabel()).toString();
// Create the target
Volume targetVolume = createRecoverPointVolume(targetRec, targetVolumeName, project, copyOfCapabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.TARGET, rsetName, null, sourceVolume, taskList, task, targetRec.getRpCopyName(), descriptors, null, false, false, false);
volumeInfoBuffer.append(logVolumeInfo(targetVolume));
volumeURIs.add(targetVolume.getId());
}
// /////// METROPOINT LOCAL TARGET(S) ///////////
if (metroPointEnabled && haRec.getTargetRecommendations() != null && !haRec.getTargetRecommendations().isEmpty()) {
// then we need to create targets for the second (stand-by) leg.
for (RPRecommendation standbyTargetRec : haRec.getTargetRecommendations()) {
// Grab the MP target's varray
VirtualArray standyTargetVirtualArray = _dbClient.queryObject(VirtualArray.class, standbyTargetRec.getVirtualArray());
_log.info(String.format("Create Standby Target (%s)..", standyTargetVirtualArray.getLabel()));
// source recommendation.
if (protectionTargets.contains(standbyTargetRec.getVirtualArray())) {
continue;
}
// standby.
if (isChangeVpoolForProtectedVolume) {
Volume alreadyProvisionedTarget = RPHelper.findAlreadyProvisionedTargetVolume(changeVpoolVolume, standyTargetVirtualArray.getId(), _dbClient);
if (alreadyProvisionedTarget != null) {
_log.info(String.format("Existing target volume [%s] found for varray [%s].", alreadyProvisionedTarget.getLabel(), standyTargetVirtualArray.getLabel()));
// No need to go further, continue on to the next target varray
continue;
}
}
// Generate standby target label
String standbyTargetVolumeName = new StringBuilder(newVolumeLabel).append(VOLUME_TYPE_TARGET + standyTargetVirtualArray.getLabel()).toString();
// Create the standby target
Volume standbyTargetVolume = createRecoverPointVolume(standbyTargetRec, standbyTargetVolumeName, project, copyOfCapabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.TARGET, rsetName, null, sourceVolume, taskList, task, standbyTargetRec.getRpCopyName(), descriptors, null, false, false, false);
volumeInfoBuffer.append(logVolumeInfo(standbyTargetVolume));
volumeURIs.add(standbyTargetVolume.getId());
}
}
// Hold off on logging the source volume until we're done creating the targets
volumeInfoBuffer.append(logVolumeInfo(sourceVolume));
}
}
volumeInfoBuffer.append(String.format(NEW_LINE));
_log.info(volumeInfoBuffer.toString());
}
}
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class FileService method createFSInternal.
/*
* all the common code for provisioning fs both in normal public API use case and
* the internal object case
* NOTE - below method should always work with project being null
*/
public TaskResourceRep createFSInternal(FileSystemParam param, Project project, TenantOrg tenant, DataObject.Flag[] flags) throws InternalException {
ArgValidator.checkFieldUriType(param.getVpool(), VirtualPool.class, "vpool");
ArgValidator.checkFieldUriType(param.getVarray(), VirtualArray.class, "varray");
Long fsSize = SizeUtil.translateSize(param.getSize());
// Convert to MB and check for 20MB min size.
Long fsSizeMB = fsSize / (1024 * 1024);
// Convert to MB and check for 20MB min size.
// VNX file has min 2MB size, NetApp 20MB and Isilon 0
// VNX File 8.1.6 min 1GB size
ArgValidator.checkFieldMinimum(fsSizeMB, 1024, "MB", "size");
ArrayList<String> requestedTypes = new ArrayList<String>();
// check varray
VirtualArray neighborhood = _dbClient.queryObject(VirtualArray.class, param.getVarray());
ArgValidator.checkEntity(neighborhood, param.getVarray(), false);
_permissionsHelper.checkTenantHasAccessToVirtualArray(tenant.getId(), neighborhood);
String task = UUID.randomUUID().toString();
// check vpool reference
VirtualPool cos = _dbClient.queryObject(VirtualPool.class, param.getVpool());
_permissionsHelper.checkTenantHasAccessToVirtualPool(tenant.getId(), cos);
ArgValidator.checkEntity(cos, param.getVpool(), false);
if (!VirtualPool.Type.file.name().equals(cos.getType())) {
throw APIException.badRequests.virtualPoolNotForFileBlockStorage(VirtualPool.Type.file.name());
}
// prepare vpool capability values
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, fsSize);
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(cos.getSupportedProvisioningType())) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.TRUE);
}
StringBuilder errorMsg = new StringBuilder();
if (cos.getFileReplicationSupported() && !FilePolicyServiceUtils.updatePolicyCapabilities(_dbClient, neighborhood, cos, project, null, capabilities, errorMsg)) {
_log.error("File system can not be created, ", errorMsg.toString());
throw APIException.badRequests.unableToProcessRequest(errorMsg.toString());
}
ArgValidator.checkFieldMaximum(param.getSoftLimit(), 100, "softLimit");
ArgValidator.checkFieldMaximum(param.getNotificationLimit(), 100, "notificationLimit");
if (param.getSoftLimit() != 0L) {
ArgValidator.checkFieldMinimum(param.getSoftGrace(), 1L, "softGrace");
}
if (param.getNotificationLimit() != 0) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.SUPPORT_NOTIFICATION_LIMIT, Boolean.TRUE);
}
if (param.getSoftLimit() != 0) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.SUPPORT_SOFT_LIMIT, Boolean.TRUE);
}
// verify quota
CapacityUtils.validateQuotasForProvisioning(_dbClient, cos, project, tenant, fsSize, "filesystem");
String suggestedNativeFsId = param.getFsId() == null ? "" : param.getFsId();
// Find the implementation that services this vpool and fileshare
FileServiceApi fileServiceApi = getFileServiceImpl(capabilities, _dbClient);
TaskList taskList = createFileTaskList(param, project, tenant, neighborhood, cos, flags, task);
// call thread that does the work.
CreateFileSystemSchedulingThread.executeApiTask(this, _asyncTaskService.getExecutorService(), _dbClient, neighborhood, project, cos, tenant, flags, capabilities, taskList, task, requestedTypes, param, fileServiceApi, suggestedNativeFsId);
auditOp(OperationTypeEnum.CREATE_FILE_SYSTEM, true, AuditLogManager.AUDITOP_BEGIN, param.getLabel(), param.getSize(), neighborhood.getId().toString(), project == null ? null : project.getId().toString());
// return the file share taskrep
return taskList.getTaskList().get(0);
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class VPlexScheduler method getRecommendationsForResources.
public List<Recommendation> getRecommendationsForResources(VirtualArray vArray, Project project, VirtualPool vPool, VirtualPoolCapabilityValuesWrapper capabilities, Map<VpoolUse, List<Recommendation>> currentRecommendations) {
_log.info("Getting recommendations for VPlex volume placement");
// availability volumes.
if (!VirtualPool.HighAvailabilityType.vplex_distributed.name().equals(vPool.getHighAvailability()) && !VirtualPool.HighAvailabilityType.vplex_local.name().equals(vPool.getHighAvailability())) {
throw APIException.badRequests.invalidHighAvailability(vPool.getHighAvailability());
}
_log.info("VirtualPool has high availability {}", vPool.getHighAvailability());
Set<URI> vplexSystemsForPlacement = getVPlexSystemsForPlacement(vArray, vPool, capabilities);
// Determine if the volume creation request is for HA volumes.
boolean isHAVolumeRequest = VirtualPool.HighAvailabilityType.vplex_distributed.name().equals(vPool.getHighAvailability());
// Get and validate the high availability VirtualArray and VirtualPool.
// Note that the HA VirtualPool is optional. When not specified, the
// high availability VirtualPool is the passed VirtualPool is use.
VirtualPool haVPool = vPool;
VirtualArray haVArray = null;
StringMap haVaVpMap = vPool.getHaVarrayVpoolMap();
if ((isHAVolumeRequest) && (haVaVpMap != null)) {
_log.info("Is HA request and with an HA VirtualArray VirtualPool map");
Iterator<String> vaIter = haVaVpMap.keySet().iterator();
while (vaIter.hasNext()) {
String haVaId = vaIter.next();
_log.info("HA VirtualArray is {}", haVaId);
if (!haVaId.equals(NullColumnValueGetter.getNullURI().toString())) {
_log.info("HA VirtualArray is not a null URI");
haVArray = getVirtualArrayForVolumeCreateRequest(project, URI.create(haVaId));
if (vArray.getId().toString().equals(haVArray.getId().toString())) {
throw APIException.badRequests.sameVirtualArrayAndHighAvailabilityArray();
}
}
// Now get the VirtualPool.
String haVpId = haVaVpMap.get(haVaId);
_log.info("HA VirtualPool is {}", haVpId);
if (!haVpId.equals(NullColumnValueGetter.getNullURI().toString())) {
_log.info("HA VirtualPool is not a null URI");
haVPool = BlockService.getVirtualPoolForRequest(project, URI.create(haVpId), _dbClient, _permissionsHelper);
}
}
}
// Get the volume placement based on passed parameters.
_log.info("VirtualPool: {}, HA VirtualPool: {}", vPool.getId().toString(), haVPool.getId().toString());
List<Recommendation> recommendations = scheduleStorage(vArray, vplexSystemsForPlacement, null, vPool, isHAVolumeRequest, haVArray, haVPool, capabilities, project, VpoolUse.ROOT, currentRecommendations);
return recommendations;
}
use of com.emc.storageos.db.client.model.VirtualArray in project coprhd-controller by CoprHD.
the class VPlexScheduler method scheduleStorageForImport.
/**
* Schedule Storage for a VPLEX import operation where we are creating the
* HA volume.
*
* @param srcNH Source Neighborhood
* @param vplexs Set<URI> Set of Vplex System URIs that can be used
* @param requestedHaNH Optional requested HA Neighborhood. Can be null.
* @param cos CoS to be used for new volumes
* @param capabilities CoS capabilities to be used for new volume
* @return List<Recommendation>
*/
public List<Recommendation> scheduleStorageForImport(VirtualArray srcNH, Set<URI> vplexs, VirtualArray requestedHaNH, VirtualPool cos, VirtualPoolCapabilityValuesWrapper capabilities) {
Set<String> vplexSystemIds = new HashSet<String>();
for (URI vplexURI : vplexs) {
vplexSystemIds.add(vplexURI.toString());
}
List<Recommendation> recommendations = new ArrayList<Recommendation>();
// For an HA request, get the possible high availability neighborhoods
// for each potential VPlex storage system.
Map<String, List<String>> vplexHaNHMap = ConnectivityUtil.getVPlexVarrays(_dbClient, vplexSystemIds, srcNH.getId());
for (URI vplexSystemURI : vplexs) {
StorageSystem vplexSystem = _dbClient.queryObject(StorageSystem.class, vplexSystemURI);
// See if there is an HA varray
// for the VPlex that also contains pools suitable to place
// the resources.
List<String> vplexHaNHIds = vplexHaNHMap.get(vplexSystem.getId().toString());
if (vplexHaNHIds == null) {
continue;
}
_log.info("Found {} HA varrays", vplexHaNHIds.size());
for (String vplexHaNHId : vplexHaNHIds) {
_log.info("Check HA varray {}", vplexHaNHId);
// varray is not it, then skip the varray.
if ((requestedHaNH != null) && (!vplexHaNHId.equals(requestedHaNH.getId().toString()))) {
_log.info("Not the requested HA varray, skip");
continue;
}
// Get all storage pools that match the passed CoS params,
// protocols, and this HA varray. In addition, the
// pool must have enough capacity to hold at least one
// resource of the requested size.
VirtualArray vplexHaNH = _dbClient.queryObject(VirtualArray.class, URI.create(vplexHaNHId));
Map<String, Object> attributeMap = new HashMap<String, Object>();
List<StoragePool> allMatchingPools = getMatchingPools(vplexHaNH, null, cos, capabilities, attributeMap);
_log.info("Found {} matching pools for HA varray", allMatchingPools.size());
// Now from the list of candidate pools, we only want pools
// on storage systems that are connected to the VPlex
// storage system. We find these storage pools and associate
// them to the VPlex storage systems to which their storage
// system is connected.
Map<String, List<StoragePool>> vplexPoolMapForHaNH = sortPoolsByVPlexStorageSystem(allMatchingPools, vplexHaNHId);
// If the HA varray has candidate pools for this
// VPlex, see if the candidate pools in this HA
// varray are sufficient to place the resources.
List<Recommendation> recommendationsForHaNH = new ArrayList<Recommendation>();
if (vplexPoolMapForHaNH.containsKey(vplexSystem.getId().toString())) {
_log.info("Found matching pools in HA NH for VPlex {}", vplexSystem.getId());
recommendationsForHaNH = _blockScheduler.getRecommendationsForPools(vplexHaNH.getId().toString(), vplexPoolMapForHaNH.get(vplexSystem.getId().toString()), capabilities);
} else {
_log.info("No matching pools in HA NH for VPlex {}", vplexSystem.getId());
}
recommendations.addAll(createVPlexRecommendations(vplexSystem.getId().toString(), vplexHaNH, cos, recommendationsForHaNH));
}
}
return recommendations;
}
Aggregations