use of com.emc.storageos.db.client.model.Migration in project coprhd-controller by CoprHD.
the class BlockOrchestrationDeviceController method postRPChangeVpoolSteps.
/**
* Needed to perform post change vpool operations on RP volumes.
*
* @param workflow
* The current workflow
* @param waitFor
* The previous operation to wait for
* @param volumeDescriptors
* All the volume descriptors
* @param taskId
* The current task id
* @return The previous operation id
*/
private String postRPChangeVpoolSteps(Workflow workflow, String waitFor, List<VolumeDescriptor> volumeDescriptors, String taskId) {
// Get the list of descriptors needed for post change virtual pool operations on RP.
List<VolumeDescriptor> rpVolumeDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.RP_EXISTING_SOURCE }, null);
// If no volume descriptors match, just return
if (rpVolumeDescriptors.isEmpty()) {
return waitFor;
}
List<VolumeDescriptor> migratedBlockDataDescriptors = new ArrayList<VolumeDescriptor>();
// We could be performing a change vpool for RP+VPLEX / MetroPoint. This means
// we could potentially have migrations that need to be done on the backend
// volumes. If migration info exists we need to collect that ahead of time.
List<URI> volumesWithMigration = new ArrayList<URI>();
if (volumeDescriptors != null) {
List<VolumeDescriptor> migrateDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME }, null);
if (migrateDescriptors != null && !migrateDescriptors.isEmpty()) {
s_logger.info("Data Migration detected, this is due to a change virtual pool operation on RP+VPLEX or MetroPoint.");
// Load the migration objects for use later
Iterator<VolumeDescriptor> migrationIter = migrateDescriptors.iterator();
while (migrationIter.hasNext()) {
VolumeDescriptor migrationDesc = migrationIter.next();
Migration migration = s_dbClient.queryObject(Migration.class, migrationDesc.getMigrationId());
volumesWithMigration.add(migration.getSource());
Volume migratedVolume = s_dbClient.queryObject(Volume.class, migration.getVolume());
VolumeDescriptor migratedBlockDataDesc = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, migratedVolume.getStorageController(), migratedVolume.getId(), null, migratedVolume.getConsistencyGroup(), migrationDesc.getCapabilitiesValues());
migratedBlockDataDescriptors.add(migratedBlockDataDesc);
}
}
}
List<VolumeDescriptor> blockDataDescriptors = new ArrayList<VolumeDescriptor>();
for (VolumeDescriptor descr : rpVolumeDescriptors) {
// If there are RP_EXISTING_SOURCE volume descriptors, we need to ensure the
// existing volumes are added to their native CGs for the change vpool request.
// Before any existing resource can be protected by RP they have to be removed
// from their existing CGs but now will need to be added to the new CG needed
// for RecoverPoint protection.
// NOTE: Only relevant for RP+VPLEX and MetroPoint. Regular RP does not enforce local
// array CGs.
Volume rpExistingSource = s_dbClient.queryObject(Volume.class, descr.getVolumeURI());
// there are associated volumes (meaning it's a VPLEX volume)
if (RPHelper.isVPlexVolume(rpExistingSource, s_dbClient)) {
s_logger.info(String.format("Adding post RP Change Vpool steps for existing VPLEX source volume [%s].", rpExistingSource.getLabel()));
// This is OK.
if (null != rpExistingSource.getAssociatedVolumes()) {
for (String assocVolumeId : rpExistingSource.getAssociatedVolumes()) {
Volume assocVolume = s_dbClient.queryObject(Volume.class, URI.create(assocVolumeId));
// deleted so let's skip it.
if (volumesWithMigration.contains(assocVolume.getId())) {
s_logger.info(String.format("Migration exists for [%s] so no need to add this volume to a backing array CG.", assocVolume.getLabel()));
continue;
}
// field has been populated during the API prepare volume steps.
if (NullColumnValueGetter.isNotNullValue(assocVolume.getReplicationGroupInstance())) {
// Create the BLOCK_DATA descriptor with the correct info
// for creating the CG and adding the backing volume to it.
VolumeDescriptor blockDataDesc = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, assocVolume.getStorageController(), assocVolume.getId(), null, rpExistingSource.getConsistencyGroup(), descr.getCapabilitiesValues());
blockDataDescriptors.add(blockDataDesc);
// Good time to update the backing volume with its new CG
assocVolume.setConsistencyGroup(rpExistingSource.getConsistencyGroup());
s_dbClient.updateObject(assocVolume);
s_logger.info(String.format("Backing volume [%s] needs to be added to CG [%s] on storage system [%s].", assocVolume.getLabel(), rpExistingSource.getConsistencyGroup(), assocVolume.getStorageController()));
}
}
}
}
}
if (!blockDataDescriptors.isEmpty()) {
// Add a step to create the local array consistency group
waitFor = _blockDeviceController.addStepsForCreateConsistencyGroup(workflow, waitFor, blockDataDescriptors, "postRPChangeVpoolCreateCG");
// Add a step to update the local array consistency group with the volumes to add
waitFor = _blockDeviceController.addStepsForUpdateConsistencyGroup(workflow, waitFor, blockDataDescriptors, null);
}
// Consolidate all the block data descriptors to see if any replica steps are needed.
blockDataDescriptors.addAll(migratedBlockDataDescriptors);
s_logger.info("Checking for Replica steps");
// Call the ReplicaDeviceController to add its methods if volumes are added to CG, and the CG associated with
// replication
// group(s)
waitFor = _replicaDeviceController.addStepsForCreateVolumes(workflow, waitFor, blockDataDescriptors, taskId);
return waitFor;
}
use of com.emc.storageos.db.client.model.Migration in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method prepareMigration.
/**
* Prepares a migration for the passed virtual volume specifying the source
* and target volumes for the migration.
*
* @param virtualVolumeURI The URI of the virtual volume.
* @param sourceURI The URI of the source volume for the migration.
* @param targetURI The URI of the target volume for the migration.
* @param token The task identifier.
*
* @return A reference to a newly created Migration.
*/
public Migration prepareMigration(URI virtualVolumeURI, URI sourceURI, URI targetURI, String token) {
Migration migration = new Migration();
migration.setId(URIUtil.createId(Migration.class));
migration.setVolume(virtualVolumeURI);
migration.setSource(sourceURI);
migration.setTarget(targetURI);
_dbClient.createObject(migration);
migration.setOpStatus(new OpStatusMap());
Operation op = _dbClient.createTaskOpStatus(Migration.class, migration.getId(), token, ResourceOperationTypeEnum.MIGRATE_BLOCK_VOLUME);
migration.getOpStatus().put(token, op);
_dbClient.updateObject(migration);
return migration;
}
use of com.emc.storageos.db.client.model.Migration in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createBackendVolumeMigrationDescriptors.
/**
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
private List<VolumeDescriptor> createBackendVolumeMigrationDescriptors(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<Recommendation> recommendations, boolean isHA, VirtualPoolCapabilityValuesWrapper capabilities) {
// If we know the backend source volume, the new backend volume
// will have the same label and project. Otherwise, the volume
// must be ingested and we know nothing about the backend volume.
// Therefore, we create the label based on the name of the VPLEX
// volume and determine the project in a manner similar to a
// volume creation.
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
URI cgURI = null;
// Check to see if the VirtualPoolCapabilityValuesWrapper have been passed in, if not, create a new one.
if (capabilities != null) {
// The consistency group or null when not specified.
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// this case, we don't want a volume creation to result in backend CGs
if ((consistencyGroup != null) && ((!consistencyGroup.created()) || (consistencyGroup.getTypes().contains(Types.LOCAL.toString())))) {
cgURI = consistencyGroup.getId();
}
} else {
capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
}
boolean premadeRecs = false;
if (recommendations == null || recommendations.isEmpty()) {
recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, capabilities, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
} else {
premadeRecs = true;
}
// If we have premade recommendations passed in and this is trying to create descriptors for HA
// then the HA rec will be at index 1 instead of index 0. Default case is index 0.
int recIndex = (premadeRecs && isHA) ? 1 : 0;
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(recIndex).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(recIndex).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
// If the cgURI is null, try and get it from the source volume.
if (cgURI == null) {
if ((sourceVolume != null) && (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup()))) {
cgURI = sourceVolume.getConsistencyGroup();
targetVolume.setConsistencyGroup(cgURI);
}
}
if ((sourceVolume != null) && NullColumnValueGetter.isNotNullValue(sourceVolume.getReplicationGroupInstance())) {
targetVolume.setReplicationGroupInstance(sourceVolume.getReplicationGroupInstance());
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, capabilities, capacity));
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, migration.getId(), capabilities));
printMigrationInfo(migration, sourceVolume, targetVolume);
return descriptors;
}
use of com.emc.storageos.db.client.model.Migration in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method prepareBackendVolumeForMigration.
/**
* Deprecated, need to start using createBackendVolumeMigrationDescriptors for use in
* BlockOrchestration.
*
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
@Deprecated
private void prepareBackendVolumeForMigration(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<URI> newVolumes, Map<URI, URI> migrationMap, Map<URI, URI> poolVolumeMap) {
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
// The VPLEX volume must be ingested and now the backend
// volume(s) are being migrated. We have no idea what the
// source volume name is. Therefore, we can just give
// them initial extensions. It is highly unlikely that
// they will have the same naming conventions for their
// backend volumes.
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
URI cgURI = null;
if (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup())) {
cgURI = sourceVolume.getConsistencyGroup();
}
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
VirtualPoolCapabilityValuesWrapper cosWrapper = new VirtualPoolCapabilityValuesWrapper();
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (cgURI != null) {
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, cgURI);
}
List<Recommendation> recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, cosWrapper, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(0).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(0).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
if (cgURI != null) {
targetVolume.setConsistencyGroup(cgURI);
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
newVolumes.add(targetVolumeURI);
poolVolumeMap.put(targetStoragePool, targetVolumeURI);
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
migrationMap.put(targetVolumeURI, migration.getId());
s_logger.info("Prepared migration {}.", migration.getId());
}
use of com.emc.storageos.db.client.model.Migration in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method computeProtectionCapacity.
/**
* This method computes a matching volume allocation capacity across all protection
* arrays. Some storage systems will allocate a slightly larger capacity than
* requested so volume sizes can become inconsistent between source and target.
* <p>
* If we are protecting between different array types, we need to determine the actual allocation size on each array. Set the capacity
* of the source and target volumes to be the larger of the actual allocation sizes. This is done to ensure the size of the source and
* target volumes are identical so RP can create the CG properly.
*
* This method returns the size of the volume to be created taking into account the above considerations.
*
* @param volumeURIs
* @param requestedSize
* Request size of the volume to be expanded
* @param isExpand
* Expand or Create volume operation
* @return the final capacity used
*/
protected Long computeProtectionCapacity(List<URI> volumeURIs, Long requestedSize, boolean isExpand, boolean isChangeVpool, List<VolumeDescriptor> volumeDescriptors) {
List<Volume> volumes = _dbClient.queryObject(Volume.class, volumeURIs);
_log.info("Performing checks to see if all volumes are of the same System Type and capacity for Protection.");
Map<URI, StorageSystem> volumeStorageSystemMap = new HashMap<URI, StorageSystem>();
List<Volume> allVolumesToCompare = new ArrayList<Volume>();
List<Volume> allVolumesToUpdateCapacity = new ArrayList<Volume>();
List<Long> currentVolumeSizes = new ArrayList<Long>();
Map<URI, String> associatedVolumePersonalityMap = new HashMap<URI, String>();
Long capacity = 0L;
// We could be performing a change vpool for RP+VPLEX / MetroPoint. This means
// we could potentially have migrations that need to be done on the backend
// volumes. If migration info exists we need to collect that ahead of time.
List<VolumeDescriptor> migrateDescriptors = null;
List<Migration> migrations = null;
if (volumeDescriptors != null) {
migrateDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME }, null);
if (migrateDescriptors != null && !migrateDescriptors.isEmpty()) {
_log.info("Data Migration detected, this is due to a change virtual pool operation on RP+VPLEX or MetroPoint.");
// Load the migration objects for use later
migrations = new ArrayList<Migration>();
Iterator<VolumeDescriptor> migrationIter = migrateDescriptors.iterator();
while (migrationIter.hasNext()) {
Migration migration = _dbClient.queryObject(Migration.class, migrationIter.next().getMigrationId());
migrations.add(migration);
}
}
}
for (Volume volume : volumes) {
// volumes as those are the real backing volumes.
if (volume.getPersonality() != null) {
if (volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString()) || volume.getPersonality().equals(Volume.PersonalityTypes.TARGET.toString())) {
allVolumesToUpdateCapacity.add(volume);
_log.info("Adding Volume [{}] to potentially have capacity adjusted.", volume.getLabel());
// If there are associated volumes, this must be a Virtual Volume for RP+VPLEX
// and we also need to load those associated/backend volumes for comparison
StringSet associatedVolumes = volume.getAssociatedVolumes();
if (associatedVolumes != null && !associatedVolumes.isEmpty()) {
_log.info("Volume [{}] is a VPLEX virtual volume.", volume.getLabel());
Iterator<String> it = associatedVolumes.iterator();
while (it.hasNext()) {
URI associatedVolumeURI = URI.create(it.next());
Volume associatedVolume = _dbClient.queryObject(Volume.class, associatedVolumeURI);
// Check to see if there is a migration for this backend volume
if (migrations != null && !migrations.isEmpty()) {
for (Migration migration : migrations) {
if (migration.getSource().equals(associatedVolume.getId())) {
_log.info("VPLEX backing volume [{}] has a migration, using migration volume instead.", associatedVolume.getLabel());
// Use the migration volume instead for the capacity adjustment check
associatedVolume = _dbClient.queryObject(Volume.class, migration.getTarget());
break;
}
}
}
// otherwise use the requested capacity
if (associatedVolume.getProvisionedCapacity().longValue() > 0) {
currentVolumeSizes.add(associatedVolume.getProvisionedCapacity());
} else {
currentVolumeSizes.add(associatedVolume.getCapacity());
}
addVolumeStorageSystem(volumeStorageSystemMap, associatedVolume);
allVolumesToCompare.add(associatedVolume);
allVolumesToUpdateCapacity.add(associatedVolume);
associatedVolumePersonalityMap.put(associatedVolume.getId(), volume.getPersonality());
_log.info("Adding Volume [{}] to potentially have capacity adjusted.", associatedVolume.getLabel());
}
} else {
// Not a VPLEX Virtual Volume, the volume itself can be used.
_log.info("Volume [{}] is not VPLEX virtual volume.", volume.getLabel());
// otherwise use the requested capacity
if (volume.getProvisionedCapacity().longValue() > 0) {
currentVolumeSizes.add(volume.getProvisionedCapacity());
} else {
currentVolumeSizes.add(volume.getCapacity());
}
addVolumeStorageSystem(volumeStorageSystemMap, volume);
allVolumesToCompare.add(volume);
}
}
} else {
_log.warn("Volume [{}] does not have PERSONALITY set. We will not be able to compare this volume.", volume.getLabel());
}
}
// Flag to indicate that there are VMAX2 and VMAX3 storage
// systems in the request. Special handling will be required.
boolean vmax2Vmax3StorageCombo = false;
// There should be at least 2 volumes to compare, Source and Target (if not more)
if (!allVolumesToCompare.isEmpty() && (allVolumesToCompare.size() >= 2)) {
StorageSystem storageSystem = null;
StorageSystem storageSystemToCompare = null;
boolean storageSystemsMatch = true;
// on type, model, and firmware version.
for (Map.Entry<URI, StorageSystem> volumeStorageSystemEntry : volumeStorageSystemMap.entrySet()) {
URI volUri = volumeStorageSystemEntry.getKey();
if (storageSystemToCompare == null) {
// Find a base for the comparison, the first element will do.
storageSystemToCompare = volumeStorageSystemMap.get(volUri);
// set storageSystem to first element if there is only one
if (volumeStorageSystemMap.size() == 1) {
storageSystem = volumeStorageSystemMap.get(volUri);
}
continue;
}
storageSystem = volumeStorageSystemMap.get(volUri);
vmax2Vmax3StorageCombo = checkVMAX2toVMAX3(storageSystemToCompare, storageSystem);
if (!storageSystemToCompare.getSystemType().equals(storageSystem.getSystemType()) || vmax2Vmax3StorageCombo) {
// The storage systems do not all match so we need to determine the allocated
// capacity on each system.
storageSystemsMatch = false;
break;
}
}
// This is a special case for change vpool to ensure this is true.
if (storageSystemsMatch && isChangeVpool && DiscoveredDataObject.Type.xtremio.name().equals(storageSystem.getSystemType())) {
for (Volume volume : allVolumesToUpdateCapacity) {
if (NullColumnValueGetter.isNotNullValue(volume.getPersonality()) && volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
capacity = volume.getProvisionedCapacity();
break;
}
}
for (Volume volume : allVolumesToUpdateCapacity) {
updateVolumeCapacity(volume, capacity, isExpand);
}
_log.info(String.format("Capacity adjustments made for XIO change vpool operation."));
return capacity;
}
// we don't do this.
if (!storageSystemsMatch || !allVolumeSizesMatch(currentVolumeSizes)) {
// The storage systems are not all the same so now we must find a volume allocation size
// that matches between all arrays.
_log.warn("The storage systems for all volumes do not match or all volume sizes do not match. " + "This could cause RP CG creation to fail. " + "Potentially need to adjust capacity size of volumes to be consistent across all source/targets.");
List<Volume> tempVolumesList = new ArrayList<Volume>();
Long currentVolumeCapacity = 0L;
Long volumeToCompareCapacity = 0L;
boolean matched = true;
Long capacityToUseInCalculation = Collections.max(currentVolumeSizes);
if (isExpand) {
capacityToUseInCalculation = requestedSize;
}
_log.info(String.format("The capacity to match to is [%s]", capacityToUseInCalculation.toString()));
// which cannot allocate storage at the exact same amount
if (!capacitiesCanMatch(volumeStorageSystemMap) || vmax2Vmax3StorageCombo) {
setUnMatchedCapacities(allVolumesToUpdateCapacity, associatedVolumePersonalityMap, isExpand, capacityToUseInCalculation);
} else {
for (int index = 0; index < allVolumesToCompare.size(); index++) {
matched = true;
tempVolumesList.clear();
tempVolumesList.addAll(allVolumesToCompare);
// Remove the current volume from the list and get a handle on it
Volume currentVolume = tempVolumesList.remove(index);
StorageSystem currentVolumeStorageSystem = _dbClient.queryObject(StorageSystem.class, currentVolume.getStorageController());
// Get the System Type for the current volume
String currentVolumeSystemType = volumeStorageSystemMap.get(currentVolume.getStorageController()).getSystemType();
// Calculate the capacity for the current volume based on the Storage System type to see if it can be adjusted
currentVolumeCapacity = capacityCalculatorFactory.getCapacityCalculator(currentVolumeSystemType).calculateAllocatedCapacity(capacityToUseInCalculation, currentVolume, _dbClient);
_log.info(String.format("Volume [%s] has a capacity of %s on storage system type %s. " + "The calculated capacity for this volume is %s.", currentVolume.getLabel(), currentVolume.getCapacity(), currentVolumeSystemType, currentVolumeCapacity));
// capacities will match.
for (Volume volumeToCompare : tempVolumesList) {
// Get the System Type for the volume to compare
String volumeToCompareSystemType = volumeStorageSystemMap.get(volumeToCompare.getStorageController()).getSystemType();
// we don't want to adjust the capacity again, so just skip it.
if (volumeToCompareSystemType.equalsIgnoreCase(currentVolumeSystemType)) {
continue;
}
StorageSystem volumeToCompareStorageSystem = _dbClient.queryObject(StorageSystem.class, volumeToCompare.getStorageController());
// Calculate the capacity for the volume to compare based on the Storage System type to see if it can be
// adjusted
volumeToCompareCapacity = capacityCalculatorFactory.getCapacityCalculator(volumeToCompareSystemType).calculateAllocatedCapacity(currentVolumeCapacity, volumeToCompare, _dbClient);
// Check to see if the capacities match
if (!currentVolumeCapacity.equals(volumeToCompareCapacity)) {
// If the capacities don't match, we can not use this capacity across all volumes
// so we will have to check the next volume. Break out of this loop and warn the user.
_log.warn(String.format("Storage System %s is not capable of allocating exactly %s bytes for volume [%s], keep trying...", volumeToCompareSystemType, currentVolumeCapacity, volumeToCompare.getLabel()));
matched = false;
break;
} else {
_log.info(String.format("Volume [%s] is capable of being provisioned at %s bytes on storage system of type %s, continue...", volumeToCompare.getLabel(), currentVolumeCapacity, volumeToCompareSystemType));
}
}
// If all volume capacities match, we have a winner.
if (matched) {
break;
}
}
if (matched) {
// We have found capacity that is consistent across all Storage Systems
capacity = currentVolumeCapacity;
_log.info("Found a capacity size that is consistent across all source/target(s) storage systems: " + capacity);
for (Volume volume : allVolumesToUpdateCapacity) {
if (isChangeVpool && NullColumnValueGetter.isNotNullValue(volume.getPersonality()) && volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
// Don't update the existing source if this is a change vpool
continue;
}
updateVolumeCapacity(volume, capacity, isExpand);
}
} else {
// Circumvent CG creation, which would fail, by throwing an exception here.
throw APIException.internalServerErrors.noMatchingAllocationCapacityFound();
}
}
} else {
_log.info(String.format("All storage systems match and/or all volume sizes are consistent. No need for any capacity adjustments."));
capacity = requestedSize;
}
} else {
_log.error("There were no volumes found to compare capacities.");
}
return capacity;
}
Aggregations