use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method logDescriptors.
/**
* Log the output from the descriptors created
*
* @param descriptors All descriptors to log
*/
private void logDescriptors(List<VolumeDescriptor> descriptors) {
StringBuffer buf = new StringBuffer();
buf.append(String.format(NEW_LINE));
buf.append(String.format("Volume descriptors for RP: %n"));
for (VolumeDescriptor desc : descriptors) {
Volume volume = _dbClient.queryObject(Volume.class, desc.getVolumeURI());
buf.append(String.format("%n\t Volume Name: [%s] %n\t Descriptor Type: [%s] %n\t Full Descriptor Info: [%s] %n", volume.getLabel(), desc.getType(), desc.toString()));
}
buf.append(String.format(NEW_LINE));
_log.info(buf.toString());
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method computeProtectionCapacity.
/**
* This method computes a matching volume allocation capacity across all protection
* arrays. Some storage systems will allocate a slightly larger capacity than
* requested so volume sizes can become inconsistent between source and target.
* <p>
* If we are protecting between different array types, we need to determine the actual allocation size on each array. Set the capacity
* of the source and target volumes to be the larger of the actual allocation sizes. This is done to ensure the size of the source and
* target volumes are identical so RP can create the CG properly.
*
* This method returns the size of the volume to be created taking into account the above considerations.
*
* @param volumeURIs
* @param requestedSize
* Request size of the volume to be expanded
* @param isExpand
* Expand or Create volume operation
* @return the final capacity used
*/
protected Long computeProtectionCapacity(List<URI> volumeURIs, Long requestedSize, boolean isExpand, boolean isChangeVpool, List<VolumeDescriptor> volumeDescriptors) {
List<Volume> volumes = _dbClient.queryObject(Volume.class, volumeURIs);
_log.info("Performing checks to see if all volumes are of the same System Type and capacity for Protection.");
Map<URI, StorageSystem> volumeStorageSystemMap = new HashMap<URI, StorageSystem>();
List<Volume> allVolumesToCompare = new ArrayList<Volume>();
List<Volume> allVolumesToUpdateCapacity = new ArrayList<Volume>();
List<Long> currentVolumeSizes = new ArrayList<Long>();
Map<URI, String> associatedVolumePersonalityMap = new HashMap<URI, String>();
Long capacity = 0L;
// We could be performing a change vpool for RP+VPLEX / MetroPoint. This means
// we could potentially have migrations that need to be done on the backend
// volumes. If migration info exists we need to collect that ahead of time.
List<VolumeDescriptor> migrateDescriptors = null;
List<Migration> migrations = null;
if (volumeDescriptors != null) {
migrateDescriptors = VolumeDescriptor.filterByType(volumeDescriptors, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME }, null);
if (migrateDescriptors != null && !migrateDescriptors.isEmpty()) {
_log.info("Data Migration detected, this is due to a change virtual pool operation on RP+VPLEX or MetroPoint.");
// Load the migration objects for use later
migrations = new ArrayList<Migration>();
Iterator<VolumeDescriptor> migrationIter = migrateDescriptors.iterator();
while (migrationIter.hasNext()) {
Migration migration = _dbClient.queryObject(Migration.class, migrationIter.next().getMigrationId());
migrations.add(migration);
}
}
}
for (Volume volume : volumes) {
// volumes as those are the real backing volumes.
if (volume.getPersonality() != null) {
if (volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString()) || volume.getPersonality().equals(Volume.PersonalityTypes.TARGET.toString())) {
allVolumesToUpdateCapacity.add(volume);
_log.info("Adding Volume [{}] to potentially have capacity adjusted.", volume.getLabel());
// If there are associated volumes, this must be a Virtual Volume for RP+VPLEX
// and we also need to load those associated/backend volumes for comparison
StringSet associatedVolumes = volume.getAssociatedVolumes();
if (associatedVolumes != null && !associatedVolumes.isEmpty()) {
_log.info("Volume [{}] is a VPLEX virtual volume.", volume.getLabel());
Iterator<String> it = associatedVolumes.iterator();
while (it.hasNext()) {
URI associatedVolumeURI = URI.create(it.next());
Volume associatedVolume = _dbClient.queryObject(Volume.class, associatedVolumeURI);
// Check to see if there is a migration for this backend volume
if (migrations != null && !migrations.isEmpty()) {
for (Migration migration : migrations) {
if (migration.getSource().equals(associatedVolume.getId())) {
_log.info("VPLEX backing volume [{}] has a migration, using migration volume instead.", associatedVolume.getLabel());
// Use the migration volume instead for the capacity adjustment check
associatedVolume = _dbClient.queryObject(Volume.class, migration.getTarget());
break;
}
}
}
// otherwise use the requested capacity
if (associatedVolume.getProvisionedCapacity().longValue() > 0) {
currentVolumeSizes.add(associatedVolume.getProvisionedCapacity());
} else {
currentVolumeSizes.add(associatedVolume.getCapacity());
}
addVolumeStorageSystem(volumeStorageSystemMap, associatedVolume);
allVolumesToCompare.add(associatedVolume);
allVolumesToUpdateCapacity.add(associatedVolume);
associatedVolumePersonalityMap.put(associatedVolume.getId(), volume.getPersonality());
_log.info("Adding Volume [{}] to potentially have capacity adjusted.", associatedVolume.getLabel());
}
} else {
// Not a VPLEX Virtual Volume, the volume itself can be used.
_log.info("Volume [{}] is not VPLEX virtual volume.", volume.getLabel());
// otherwise use the requested capacity
if (volume.getProvisionedCapacity().longValue() > 0) {
currentVolumeSizes.add(volume.getProvisionedCapacity());
} else {
currentVolumeSizes.add(volume.getCapacity());
}
addVolumeStorageSystem(volumeStorageSystemMap, volume);
allVolumesToCompare.add(volume);
}
}
} else {
_log.warn("Volume [{}] does not have PERSONALITY set. We will not be able to compare this volume.", volume.getLabel());
}
}
// Flag to indicate that there are VMAX2 and VMAX3 storage
// systems in the request. Special handling will be required.
boolean vmax2Vmax3StorageCombo = false;
// There should be at least 2 volumes to compare, Source and Target (if not more)
if (!allVolumesToCompare.isEmpty() && (allVolumesToCompare.size() >= 2)) {
StorageSystem storageSystem = null;
StorageSystem storageSystemToCompare = null;
boolean storageSystemsMatch = true;
// on type, model, and firmware version.
for (Map.Entry<URI, StorageSystem> volumeStorageSystemEntry : volumeStorageSystemMap.entrySet()) {
URI volUri = volumeStorageSystemEntry.getKey();
if (storageSystemToCompare == null) {
// Find a base for the comparison, the first element will do.
storageSystemToCompare = volumeStorageSystemMap.get(volUri);
// set storageSystem to first element if there is only one
if (volumeStorageSystemMap.size() == 1) {
storageSystem = volumeStorageSystemMap.get(volUri);
}
continue;
}
storageSystem = volumeStorageSystemMap.get(volUri);
vmax2Vmax3StorageCombo = checkVMAX2toVMAX3(storageSystemToCompare, storageSystem);
if (!storageSystemToCompare.getSystemType().equals(storageSystem.getSystemType()) || vmax2Vmax3StorageCombo) {
// The storage systems do not all match so we need to determine the allocated
// capacity on each system.
storageSystemsMatch = false;
break;
}
}
// This is a special case for change vpool to ensure this is true.
if (storageSystemsMatch && isChangeVpool && DiscoveredDataObject.Type.xtremio.name().equals(storageSystem.getSystemType())) {
for (Volume volume : allVolumesToUpdateCapacity) {
if (NullColumnValueGetter.isNotNullValue(volume.getPersonality()) && volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
capacity = volume.getProvisionedCapacity();
break;
}
}
for (Volume volume : allVolumesToUpdateCapacity) {
updateVolumeCapacity(volume, capacity, isExpand);
}
_log.info(String.format("Capacity adjustments made for XIO change vpool operation."));
return capacity;
}
// we don't do this.
if (!storageSystemsMatch || !allVolumeSizesMatch(currentVolumeSizes)) {
// The storage systems are not all the same so now we must find a volume allocation size
// that matches between all arrays.
_log.warn("The storage systems for all volumes do not match or all volume sizes do not match. " + "This could cause RP CG creation to fail. " + "Potentially need to adjust capacity size of volumes to be consistent across all source/targets.");
List<Volume> tempVolumesList = new ArrayList<Volume>();
Long currentVolumeCapacity = 0L;
Long volumeToCompareCapacity = 0L;
boolean matched = true;
Long capacityToUseInCalculation = Collections.max(currentVolumeSizes);
if (isExpand) {
capacityToUseInCalculation = requestedSize;
}
_log.info(String.format("The capacity to match to is [%s]", capacityToUseInCalculation.toString()));
// which cannot allocate storage at the exact same amount
if (!capacitiesCanMatch(volumeStorageSystemMap) || vmax2Vmax3StorageCombo) {
setUnMatchedCapacities(allVolumesToUpdateCapacity, associatedVolumePersonalityMap, isExpand, capacityToUseInCalculation);
} else {
for (int index = 0; index < allVolumesToCompare.size(); index++) {
matched = true;
tempVolumesList.clear();
tempVolumesList.addAll(allVolumesToCompare);
// Remove the current volume from the list and get a handle on it
Volume currentVolume = tempVolumesList.remove(index);
StorageSystem currentVolumeStorageSystem = _dbClient.queryObject(StorageSystem.class, currentVolume.getStorageController());
// Get the System Type for the current volume
String currentVolumeSystemType = volumeStorageSystemMap.get(currentVolume.getStorageController()).getSystemType();
// Calculate the capacity for the current volume based on the Storage System type to see if it can be adjusted
currentVolumeCapacity = capacityCalculatorFactory.getCapacityCalculator(currentVolumeSystemType).calculateAllocatedCapacity(capacityToUseInCalculation, currentVolume, _dbClient);
_log.info(String.format("Volume [%s] has a capacity of %s on storage system type %s. " + "The calculated capacity for this volume is %s.", currentVolume.getLabel(), currentVolume.getCapacity(), currentVolumeSystemType, currentVolumeCapacity));
// capacities will match.
for (Volume volumeToCompare : tempVolumesList) {
// Get the System Type for the volume to compare
String volumeToCompareSystemType = volumeStorageSystemMap.get(volumeToCompare.getStorageController()).getSystemType();
// we don't want to adjust the capacity again, so just skip it.
if (volumeToCompareSystemType.equalsIgnoreCase(currentVolumeSystemType)) {
continue;
}
StorageSystem volumeToCompareStorageSystem = _dbClient.queryObject(StorageSystem.class, volumeToCompare.getStorageController());
// Calculate the capacity for the volume to compare based on the Storage System type to see if it can be
// adjusted
volumeToCompareCapacity = capacityCalculatorFactory.getCapacityCalculator(volumeToCompareSystemType).calculateAllocatedCapacity(currentVolumeCapacity, volumeToCompare, _dbClient);
// Check to see if the capacities match
if (!currentVolumeCapacity.equals(volumeToCompareCapacity)) {
// If the capacities don't match, we can not use this capacity across all volumes
// so we will have to check the next volume. Break out of this loop and warn the user.
_log.warn(String.format("Storage System %s is not capable of allocating exactly %s bytes for volume [%s], keep trying...", volumeToCompareSystemType, currentVolumeCapacity, volumeToCompare.getLabel()));
matched = false;
break;
} else {
_log.info(String.format("Volume [%s] is capable of being provisioned at %s bytes on storage system of type %s, continue...", volumeToCompare.getLabel(), currentVolumeCapacity, volumeToCompareSystemType));
}
}
// If all volume capacities match, we have a winner.
if (matched) {
break;
}
}
if (matched) {
// We have found capacity that is consistent across all Storage Systems
capacity = currentVolumeCapacity;
_log.info("Found a capacity size that is consistent across all source/target(s) storage systems: " + capacity);
for (Volume volume : allVolumesToUpdateCapacity) {
if (isChangeVpool && NullColumnValueGetter.isNotNullValue(volume.getPersonality()) && volume.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
// Don't update the existing source if this is a change vpool
continue;
}
updateVolumeCapacity(volume, capacity, isExpand);
}
} else {
// Circumvent CG creation, which would fail, by throwing an exception here.
throw APIException.internalServerErrors.noMatchingAllocationCapacityFound();
}
}
} else {
_log.info(String.format("All storage systems match and/or all volume sizes are consistent. No need for any capacity adjustments."));
capacity = requestedSize;
}
} else {
_log.error("There were no volumes found to compare capacities.");
}
return capacity;
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method createVolumeDescriptors.
/**
* Prep work to call the orchestrator to create the volume descriptors
*
* @param recommendation recommendation object from RPRecommendation
* @param volumeURIs volumes already prepared
* @param capabilities vpool capabilities
* @return list of volume descriptors
* @throws ControllerException
*/
private List<VolumeDescriptor> createVolumeDescriptors(RPProtectionRecommendation recommendation, List<URI> volumeURIs, VirtualPoolCapabilityValuesWrapper capabilities, URI oldVpool, URI computeResource) {
List<Volume> preparedVolumes = _dbClient.queryObject(Volume.class, volumeURIs);
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
// Package up the Volume descriptors
for (Volume volume : preparedVolumes) {
boolean vplex = RPHelper.isVPlexVolume(volume, _dbClient);
VolumeDescriptor.Type volumeType = VolumeDescriptor.Type.RP_SOURCE;
if (vplex) {
volumeType = VolumeDescriptor.Type.RP_VPLEX_VIRT_SOURCE;
}
// If capabilities is null this is an expand operation no need to set the max number of snaps
if (RPHelper.protectXtremioVolume(volume, _dbClient) && capabilities != null) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.RP_MAX_SNAPS, 128);
}
VolumeDescriptor desc = null;
// Vpool Change flow, mark the production volume as already existing, so it doesn't get created
if (recommendation != null && (recommendation.getVpoolChangeVolume() != null) && Volume.PersonalityTypes.SOURCE.toString().equals(volume.getPersonality())) {
if (recommendation.isVpoolChangeProtectionAlreadyExists()) {
volumeType = VolumeDescriptor.Type.RP_EXISTING_PROTECTED_SOURCE;
} else {
volumeType = VolumeDescriptor.Type.RP_EXISTING_SOURCE;
}
desc = new VolumeDescriptor(volumeType, volume.getStorageController(), volume.getId(), volume.getPool(), null, capabilities, volume.getCapacity());
Map<String, Object> volumeParams = new HashMap<String, Object>();
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_EXISTING_VOLUME_ID, recommendation.getVpoolChangeVolume());
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_NEW_VPOOL_ID, recommendation.getVpoolChangeNewVpool());
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_OLD_VPOOL_ID, oldVpool);
desc.setParameters(volumeParams);
descriptors.add(desc);
} else {
// Normal create-from-scratch flow
if (volume.getPersonality() == null) {
throw APIException.badRequests.missingPersonalityAttribute(String.valueOf(volume.getId()));
}
if (volume.getPersonality().equals(Volume.PersonalityTypes.TARGET.toString())) {
if (vplex) {
volumeType = VolumeDescriptor.Type.RP_VPLEX_VIRT_TARGET;
} else {
volumeType = VolumeDescriptor.Type.RP_TARGET;
}
} else if (volume.getPersonality().equals(Volume.PersonalityTypes.METADATA.toString())) {
if (vplex) {
volumeType = VolumeDescriptor.Type.RP_VPLEX_VIRT_JOURNAL;
} else {
volumeType = VolumeDescriptor.Type.RP_JOURNAL;
}
}
desc = new VolumeDescriptor(volumeType, volume.getStorageController(), volume.getId(), volume.getPool(), null, capabilities, volume.getCapacity());
if (volume.checkPersonality(Volume.PersonalityTypes.SOURCE.name()) && computeResource != null) {
_log.info(String.format("Volume %s - will be exported to Host/Cluster: %s", volume.getLabel(), computeResource.toString()));
desc.setComputeResource(computeResource);
}
descriptors.add(desc);
}
}
return descriptors;
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method removeProtection.
/**
* Removes protection from the volume and leaves it in an unprotected state.
*
* @param volumes the existing volume being protected.
* @param newVpool the requested virtual pool
* @param taskId the task identifier
* @throws InternalException
*/
private void removeProtection(List<Volume> volumes, VirtualPool newVpool, String taskId) throws InternalException {
List<URI> volumeURIs = new ArrayList<URI>();
for (Volume volume : volumes) {
_log.info(String.format("Request to remove protection from Volume [%s] (%s) and move it to Virtual Pool [%s] (%s)", volume.getLabel(), volume.getId(), newVpool.getLabel(), newVpool.getId()));
volumeURIs.add(volume.getId());
// List of RP bookmarks to cleanup (if any)
List<BlockSnapshot> rpBookmarks = new ArrayList<BlockSnapshot>();
// Get all the block snapshots and RP bookmarks for the source.
List<BlockSnapshot> sourceSnapshots = this.getSnapshotsForVolume(volume);
// Iterate through all snapshots found for the source
for (BlockSnapshot sourceSnapshot : sourceSnapshots) {
// Check to see if this is an RP bookmark
if (TechnologyType.RP.name().equals(sourceSnapshot.getTechnologyType())) {
// protection.
if (sourceSnapshot.isSnapshotExported(_dbClient)) {
String warningMessage = String.format("RP Bookmark/Snapshot [%s](%s) is exported to Host, " + "please un-export the Bookmark/Snapshot from all exports and place the order again", sourceSnapshot.getLabel(), sourceSnapshot.getId());
_log.warn(warningMessage);
throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
}
// Add bookmark to be cleaned up in ViPR. These
// would have been automatically removed in RP when
// removing protection anyway. So this is a pro-active
// cleanup step.
rpBookmarks.add(sourceSnapshot);
} else {
// Check to see if the source volume is a RP+VPLEX/MetroPoint volume.
if (RPHelper.isVPlexVolume(volume, _dbClient)) {
// There are block snapshots on the RP+VPLEX/MetroPoint Source, throw an exception to inform the
// user. We can not remove protection from a RP+VPLEX Source when there are active block snapshots.
// RP+VPLEX/MetroPoint block snapshots are actually replica group snapshots (in a CG). Since we need to
// remove the CG from the volume we can not have the replica group containing snaps in it when
// trying to remove protection.
String warningMessage = String.format("RecoverPoint protected VPLEX Volume [%s](%s) has an active snapshot, please delete the " + "following snapshot and place the order again: [%s](%s)", volume.getLabel(), volume.getId(), sourceSnapshot.getLabel(), sourceSnapshot.getId());
warningMessage = warningMessage.substring(0, warningMessage.length() - 2);
_log.warn(warningMessage);
throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
}
}
}
// 2. There are local array snapshots on any of the targets
for (String targetId : volume.getRpTargets()) {
Volume targetVolume = _dbClient.queryObject(Volume.class, URI.create(targetId));
// Ensure targets are not exported
if (targetVolume.isVolumeExported(_dbClient, true, true)) {
String warningMessage = String.format("Target Volume [%s](%s) is exported to Host, please " + "un-export the volume from all exports and place the order again", targetVolume.getLabel(), targetVolume.getId());
_log.warn(warningMessage);
throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
}
List<BlockSnapshot> targetSnapshots = this.getSnapshotsForVolume(targetVolume);
for (BlockSnapshot targetSnapshot : targetSnapshots) {
// There are snapshots on the targets, throw an exception to inform the
// user. We do not want to auto-clean up the snapshots on the target.
// The user should first clean up those snapshots.
String warningMessage = String.format("Target Volume [%s] (%s) has an active snapshot, please delete the " + "following snapshot and place the order again: [%s](%s)", volume.getLabel(), volume.getId(), targetSnapshot.getLabel(), targetSnapshot.getId());
_log.warn(warningMessage);
throw APIException.badRequests.rpBlockApiImplRemoveProtectionException(warningMessage);
}
}
if (!rpBookmarks.isEmpty()) {
for (BlockSnapshot bookmark : rpBookmarks) {
_log.info(String.format("Deleting RP Snapshot/Bookmark [%s] (%s)", bookmark.getLabel(), bookmark.getId()));
// Generate task id
final String deleteSnapshotTaskId = UUID.randomUUID().toString();
// Delete the snapshot
this.deleteSnapshot(bookmark, Arrays.asList(bookmark), deleteSnapshotTaskId, VolumeDeleteTypeEnum.FULL.name());
}
}
}
// Get volume descriptors for all volumes to remove protection from.
List<VolumeDescriptor> volumeDescriptors = RPHelper.getDescriptorsForVolumesToBeDeleted(null, volumeURIs, RPHelper.REMOVE_PROTECTION, newVpool, _dbClient);
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.deleteVolumes(volumeDescriptors, taskId);
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method rpVPlexDataMigration.
/**
* Create the RP+VPLEX/MetroPoint Data Migration volume descriptors to be passed to the block orchestration
* change vpool workflow.
*
* @param volumes The RP+VPLEX/MetroPoint volumes to migrate
* @param newVpool The vpool to migrate to
* @param taskId The task
* @param validMigrations All valid migrations
* @param vpoolChangeParam VirtualPool change parameters used to determine if need to suspend on migration
* @return List of tasks
* @throws InternalException
*/
private TaskList rpVPlexDataMigration(List<Volume> volumes, VirtualPool newVpool, String taskId, List<RPVPlexMigration> validMigrations, VirtualPoolChangeParam vpoolChangeParam) throws InternalException {
// TaskList to return
TaskList taskList = new TaskList();
if (validMigrations == null || validMigrations.isEmpty()) {
_log.warn(String.format("No RP+VPLEX migrations found"));
return taskList;
}
_log.info(String.format("%s RP+VPLEX migrations found", validMigrations.size()));
List<RPVPlexMigration> sourceVpoolMigrations = new ArrayList<RPVPlexMigration>();
List<RPVPlexMigration> targetVpoolMigrations = new ArrayList<RPVPlexMigration>();
List<RPVPlexMigration> journalVpoolMigrations = new ArrayList<RPVPlexMigration>();
try {
// Group the migrations by personality
for (RPVPlexMigration migration : validMigrations) {
switch(migration.getType()) {
case SOURCE:
sourceVpoolMigrations.add(migration);
break;
case TARGET:
targetVpoolMigrations.add(migration);
break;
case METADATA:
journalVpoolMigrations.add(migration);
break;
default:
break;
}
}
// Convenience booleans to quickly check which migrations are required
boolean sourceMigrationsExist = (!sourceVpoolMigrations.isEmpty());
boolean targetMigrationsExist = (!targetVpoolMigrations.isEmpty());
boolean journalMigrationsExist = (!journalVpoolMigrations.isEmpty());
if (!sourceMigrationsExist && (targetMigrationsExist || journalMigrationsExist)) {
// When there are no Source migrations and the Source volumes are in RGs we need
// to make sure all those Source volumes are in the request.
//
// Otherwise we could have the case where some Source volumes have been moved to a
// new vpool and some have not.
validateSourceVolumesInRGForMigrationRequest(volumes);
}
_log.info(String.format("%s SOURCE migrations, %s TARGET migrations, %s METADATA migrations", sourceVpoolMigrations.size(), targetVpoolMigrations.size(), journalVpoolMigrations.size()));
// Buffer to log all the migrations
StringBuffer logMigrations = new StringBuffer();
logMigrations.append("\n\nRP+VPLEX Migrations:\n");
// Step 2
//
// Let's find out if there are any Source and Target volumes to migrate.
// Source and Target migrations will be treated in two different ways depending
// on if the VPLEX backend volumes are in an array Replication Group(RG) or not.
//
// 1. In RG
// Being in an RG means that the all volumes in the RG will need to be
// grouped and migrated together.
// NOTE:
// a) All volumes in the RG will need to be selected for the operation to proceed.
// b) There is restriction on the number of volumes in the RG that will be allowed for the migration.
// Default value is 25 volumes. This is an existing limitation in the VPLEX code.
// c) Journal volumes will never be in a backend RG.
// 2. Not in RG
// Treated as a normal single migration.
HashMap<VirtualPool, List<Volume>> allSourceVolumesToMigrate = new HashMap<VirtualPool, List<Volume>>();
HashMap<VirtualPool, List<Volume>> allTargetVolumesToMigrate = new HashMap<VirtualPool, List<Volume>>();
findSourceAndTargetMigrations(volumes, newVpool, sourceMigrationsExist, allSourceVolumesToMigrate, targetMigrationsExist, allTargetVolumesToMigrate, targetVpoolMigrations);
// Step 3
//
// Handle all Source and Target migrations. The ones grouped by RG will
// be migrated together. The others will be treated as single migrations.
// Map to store single migrations (those not grouped by RG)
Map<Volume, VirtualPool> singleMigrations = new HashMap<Volume, VirtualPool>();
// Source
//
// Source volumes could need to be grouped by RG or not (single migration).
//
// Grouped migrations will have a migration WF initiated via the
// call to migrateVolumesInReplicationGroup().
//
// Single migrations will be collected afterwards to be migrated explicitly in Step 4 and 6
// below.
rpVPlexGroupedMigrations(allSourceVolumesToMigrate, singleMigrations, Volume.PersonalityTypes.SOURCE.name(), logMigrations, taskList, vpoolChangeParam);
// Targets
//
// Target volumes could need to be grouped by RG or not (single migration).
//
// Grouped migrations will have a migration WF initiated via the
// call to migrateVolumesInReplicationGroup().
//
// Single migrations will be collected afterwards to be migrated explicitly in Step 4 and 6
// below.
rpVPlexGroupedMigrations(allTargetVolumesToMigrate, singleMigrations, Volume.PersonalityTypes.TARGET.name(), logMigrations, taskList, vpoolChangeParam);
// Journals
//
// Journals will never be in RGs so they will always be treated as single migrations.
// Journal volumes must be checked against the CG. So we need to gather all affected
// CGs in the request.
// A new task will be generated to track each Journal migration.
Set<URI> cgURIs = BlockConsistencyGroupUtils.getAllCGsFromVolumes(volumes);
rpVPlexJournalMigrations(journalMigrationsExist, journalVpoolMigrations, singleMigrations, cgURIs, logMigrations);
logMigrations.append("\n");
_log.info(logMigrations.toString());
// Step 4
//
// Create the migration volume descriptors for all single migrations that are not in an RG.
List<VolumeDescriptor> migrateVolumeDescriptors = new ArrayList<VolumeDescriptor>();
for (Map.Entry<Volume, VirtualPool> entry : singleMigrations.entrySet()) {
Volume migrateVolume = entry.getKey();
VirtualPool migrateToVpool = entry.getValue();
boolean allowHighAvailabilityMigrations = true;
if (!migrateVolume.getAssociatedVolumes().isEmpty()) {
// This is mainly an issue for RP+VPLEX journals.
if (migrateVolume.getAssociatedVolumes().size() <= 1) {
allowHighAvailabilityMigrations = false;
}
} else {
// Ex: Active Source journals that use the default Source vpool for provisioning.
if (Volume.PersonalityTypes.METADATA.name().equals(migrateVolume.getPersonality())) {
allowHighAvailabilityMigrations = false;
}
}
StorageSystem vplexStorageSystem = _dbClient.queryObject(StorageSystem.class, migrateVolume.getStorageController());
migrateVolumeDescriptors.addAll(vplexBlockServiceApiImpl.createChangeVirtualPoolDescriptors(vplexStorageSystem, migrateVolume, migrateToVpool, taskId, null, null, null, allowHighAvailabilityMigrations));
}
// ensure the task is completed correctly and the vpools updated by the completer.
if (!sourceMigrationsExist && (targetMigrationsExist || journalMigrationsExist)) {
_log.info("No RP+VPLEX Source migrations detected, creating DUMMY_MIGRATE volume descriptors for the Source volumes.");
for (Volume volume : volumes) {
if (volume.checkPersonality(Volume.PersonalityTypes.SOURCE)) {
// Add the VPLEX Virtual Volume Descriptor for change vpool
VolumeDescriptor dummyMigrate = new VolumeDescriptor(VolumeDescriptor.Type.DUMMY_MIGRATE, volume.getStorageController(), volume.getId(), volume.getPool(), null);
Map<String, Object> volumeParams = new HashMap<String, Object>();
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_EXISTING_VOLUME_ID, volume.getId());
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_NEW_VPOOL_ID, newVpool.getId());
volumeParams.put(VolumeDescriptor.PARAM_VPOOL_CHANGE_OLD_VPOOL_ID, volume.getVirtualPool());
dummyMigrate.setParameters(volumeParams);
migrateVolumeDescriptors.add(dummyMigrate);
}
}
}
// single migrations.
if (!migrateVolumeDescriptors.isEmpty()) {
// Generate the correct task information for single migrations
List<Volume> migrationVolumes = new ArrayList<Volume>();
migrationVolumes.addAll(singleMigrations.keySet());
taskList.getTaskList().addAll(createTasksForVolumes(newVpool, migrationVolumes, taskId).getTaskList());
// Invoke the block orchestrator for the change vpool operation
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.changeVirtualPool(migrateVolumeDescriptors, taskId);
} else {
_log.info(String.format("No extra migrations needed."));
}
} catch (Exception e) {
String errorMsg = String.format("Volume VirtualPool change error: %s", e.getMessage());
_log.error(errorMsg, e);
for (TaskResourceRep volumeTask : taskList.getTaskList()) {
volumeTask.setState(Operation.Status.error.name());
volumeTask.setMessage(errorMsg);
_dbClient.updateTaskOpStatus(Volume.class, volumeTask.getResource().getId(), taskId, new Operation(Operation.Status.error.name(), errorMsg));
}
throw e;
}
return taskList;
}
Aggregations