use of com.emc.storageos.volumecontroller.Recommendation in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createBackendVolumeMigrationDescriptors.
/**
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
private List<VolumeDescriptor> createBackendVolumeMigrationDescriptors(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<Recommendation> recommendations, boolean isHA, VirtualPoolCapabilityValuesWrapper capabilities) {
// If we know the backend source volume, the new backend volume
// will have the same label and project. Otherwise, the volume
// must be ingested and we know nothing about the backend volume.
// Therefore, we create the label based on the name of the VPLEX
// volume and determine the project in a manner similar to a
// volume creation.
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
URI cgURI = null;
// Check to see if the VirtualPoolCapabilityValuesWrapper have been passed in, if not, create a new one.
if (capabilities != null) {
// The consistency group or null when not specified.
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// this case, we don't want a volume creation to result in backend CGs
if ((consistencyGroup != null) && ((!consistencyGroup.created()) || (consistencyGroup.getTypes().contains(Types.LOCAL.toString())))) {
cgURI = consistencyGroup.getId();
}
} else {
capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
}
boolean premadeRecs = false;
if (recommendations == null || recommendations.isEmpty()) {
recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, capabilities, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
} else {
premadeRecs = true;
}
// If we have premade recommendations passed in and this is trying to create descriptors for HA
// then the HA rec will be at index 1 instead of index 0. Default case is index 0.
int recIndex = (premadeRecs && isHA) ? 1 : 0;
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(recIndex).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(recIndex).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
// If the cgURI is null, try and get it from the source volume.
if (cgURI == null) {
if ((sourceVolume != null) && (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup()))) {
cgURI = sourceVolume.getConsistencyGroup();
targetVolume.setConsistencyGroup(cgURI);
}
}
if ((sourceVolume != null) && NullColumnValueGetter.isNotNullValue(sourceVolume.getReplicationGroupInstance())) {
targetVolume.setReplicationGroupInstance(sourceVolume.getReplicationGroupInstance());
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, capabilities, capacity));
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, migration.getId(), capabilities));
printMigrationInfo(migration, sourceVolume, targetVolume);
return descriptors;
}
use of com.emc.storageos.volumecontroller.Recommendation in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method prepareBackendVolumeForMigration.
/**
* Deprecated, need to start using createBackendVolumeMigrationDescriptors for use in
* BlockOrchestration.
*
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
@Deprecated
private void prepareBackendVolumeForMigration(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<URI> newVolumes, Map<URI, URI> migrationMap, Map<URI, URI> poolVolumeMap) {
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
// The VPLEX volume must be ingested and now the backend
// volume(s) are being migrated. We have no idea what the
// source volume name is. Therefore, we can just give
// them initial extensions. It is highly unlikely that
// they will have the same naming conventions for their
// backend volumes.
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
URI cgURI = null;
if (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup())) {
cgURI = sourceVolume.getConsistencyGroup();
}
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
VirtualPoolCapabilityValuesWrapper cosWrapper = new VirtualPoolCapabilityValuesWrapper();
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (cgURI != null) {
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, cgURI);
}
List<Recommendation> recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, cosWrapper, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(0).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(0).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
if (cgURI != null) {
targetVolume.setConsistencyGroup(cgURI);
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
newVolumes.add(targetVolumeURI);
poolVolumeMap.put(targetStoragePool, targetVolumeURI);
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
migrationMap.put(targetVolumeURI, migration.getId());
s_logger.info("Prepared migration {}.", migration.getId());
}
use of com.emc.storageos.volumecontroller.Recommendation in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createVolumes.
/**
* {@inheritDoc}
*
* @throws InternalException
*/
@Override
public TaskList createVolumes(VolumeCreate param, Project project, VirtualArray vArray, VirtualPool vPool, Map<VpoolUse, List<Recommendation>> recommendationMap, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper vPoolCapabilities) throws InternalException {
List<Recommendation> volRecommendations = recommendationMap.get(VpoolUse.ROOT);
List<Recommendation> srdfCopyRecommendations = recommendationMap.get(VpoolUse.SRDF_COPY);
if (taskList == null) {
taskList = new TaskList();
}
List<URI> allVolumes = new ArrayList<URI>();
List<VolumeDescriptor> descriptors = createVPlexVolumeDescriptors(param, project, vArray, vPool, volRecommendations, task, vPoolCapabilities, vPoolCapabilities.getBlockConsistencyGroup(), taskList, allVolumes, true);
for (VolumeDescriptor desc : descriptors) {
s_logger.info("Vplex Root Descriptors: " + desc.toString());
}
if (srdfCopyRecommendations != null) {
// This may be a Vplex volume or not
for (Recommendation srdfCopyRecommendation : srdfCopyRecommendations) {
vArray = _dbClient.queryObject(VirtualArray.class, srdfCopyRecommendation.getVirtualArray());
vPool = srdfCopyRecommendation.getVirtualPool();
List<VolumeDescriptor> srdfCopyDescriptors = new ArrayList<VolumeDescriptor>();
List<Recommendation> copyRecommendations = new ArrayList<Recommendation>();
copyRecommendations.add(srdfCopyRecommendation);
if (srdfCopyRecommendation instanceof VPlexRecommendation) {
String name = param.getName();
// Do not pass in the consistency group for vplex volumes fronting targets
// as we will eventually put them in the target CG.
srdfCopyDescriptors = createVPlexVolumeDescriptors(param, project, vArray, vPool, copyRecommendations, task, vPoolCapabilities, null, taskList, allVolumes, true);
param.setName(name);
} else {
srdfCopyDescriptors = super.createVolumesAndDescriptors(srdfCopyDescriptors, param.getName() + "_srdf_copy", vPoolCapabilities.getSize(), project, vArray, vPool, copyRecommendations, taskList, task, vPoolCapabilities);
}
for (VolumeDescriptor desc : srdfCopyDescriptors) {
s_logger.info("SRDF Copy: " + desc.toString());
}
descriptors.addAll(srdfCopyDescriptors);
}
}
// Log volume descriptor information
logVolumeDescriptorPrecreateInfo(descriptors, task);
// Now we get the Orchestration controller and use it to create the volumes of all types.
try {
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.createVolumes(descriptors, task);
} catch (InternalException e) {
if (s_logger.isErrorEnabled()) {
s_logger.error("Controller error", e);
}
String errMsg = String.format("Controller error: %s", e.getMessage());
Operation statusUpdate = new Operation(Operation.Status.error.name(), errMsg);
for (URI volumeURI : allVolumes) {
_dbClient.updateTaskOpStatus(Volume.class, volumeURI, task, statusUpdate);
}
for (TaskResourceRep volumeTask : taskList.getTaskList()) {
volumeTask.setState(Operation.Status.error.name());
volumeTask.setMessage(errMsg);
}
throw e;
}
return taskList;
}
use of com.emc.storageos.volumecontroller.Recommendation in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method sortRecommendationsByVarray.
/**
* Sort the recommendations by VirtualArray. There can be up to two
* VirtualArrays, the requested VirtualArray and the HA VirtualArray
* either passed or determined by the placement when HA virtual volumes
* are being created. We also set the VPlex storage system, which
* should be the same for all recommendations.
*
* @param recommendations -- list of Recommendations
* @param vplexSystemURIOut -- Output parameter the Vplex system URI
* @return
*/
private Map<String, List<VPlexRecommendation>> sortRecommendationsByVarray(List<Recommendation> recommendations, URI[] vplexSystemURIOut) {
URI vplexStorageSystemURI = null;
Map<String, List<VPlexRecommendation>> varrayRecommendationsMap = new HashMap<String, List<VPlexRecommendation>>();
for (Recommendation recommendation : recommendations) {
VPlexRecommendation vplexRecommendation = (VPlexRecommendation) recommendation;
String varrayId = vplexRecommendation.getVirtualArray().toString();
if (vplexStorageSystemURI == null) {
vplexStorageSystemURI = vplexRecommendation.getVPlexStorageSystem();
vplexSystemURIOut[0] = vplexStorageSystemURI;
}
if (!varrayRecommendationsMap.containsKey(varrayId)) {
List<VPlexRecommendation> varrayRecommendations = new ArrayList<VPlexRecommendation>();
varrayRecommendations.add(vplexRecommendation);
varrayRecommendationsMap.put(varrayId, varrayRecommendations);
} else {
List<VPlexRecommendation> varrayRecommendations = varrayRecommendationsMap.get(varrayId);
varrayRecommendations.add(vplexRecommendation);
}
}
return varrayRecommendationsMap;
}
use of com.emc.storageos.volumecontroller.Recommendation in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method getRecommendationsForVirtualPoolChangeRequest.
/**
* Recommendations for change vpool
*
* @param changeVpoolVolume
* Volume to be moved
* @param newVpool
* The new vpool
* @param vpoolChangeParam
* The change vpool param
* @return List of recommendations for change vpool
*/
private List<Recommendation> getRecommendationsForVirtualPoolChangeRequest(Volume changeVpoolVolume, VirtualPool newVpool, VirtualPoolChangeParam vpoolChangeParam, VirtualPoolCapabilityValuesWrapper capabilities) {
Project project = _dbClient.queryObject(Project.class, changeVpoolVolume.getProject());
List<Recommendation> recommendations = null;
if (changeVpoolVolume.checkForRp()) {
recommendations = getBlockScheduler().scheduleStorageForVpoolChangeProtected(changeVpoolVolume, newVpool, RecoverPointScheduler.getProtectionVirtualArraysForVirtualPool(project, newVpool, _dbClient, super.getPermissionsHelper()));
} else {
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, changeVpoolVolume.getVirtualArray());
recommendations = getBlockScheduler().getRecommendationsForResources(varray, project, newVpool, capabilities);
}
return recommendations;
}
Aggregations