use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createBackendVolumeMigrationDescriptors.
/**
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
private List<VolumeDescriptor> createBackendVolumeMigrationDescriptors(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<Recommendation> recommendations, boolean isHA, VirtualPoolCapabilityValuesWrapper capabilities) {
// If we know the backend source volume, the new backend volume
// will have the same label and project. Otherwise, the volume
// must be ingested and we know nothing about the backend volume.
// Therefore, we create the label based on the name of the VPLEX
// volume and determine the project in a manner similar to a
// volume creation.
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
URI cgURI = null;
// Check to see if the VirtualPoolCapabilityValuesWrapper have been passed in, if not, create a new one.
if (capabilities != null) {
// The consistency group or null when not specified.
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// this case, we don't want a volume creation to result in backend CGs
if ((consistencyGroup != null) && ((!consistencyGroup.created()) || (consistencyGroup.getTypes().contains(Types.LOCAL.toString())))) {
cgURI = consistencyGroup.getId();
}
} else {
capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
}
boolean premadeRecs = false;
if (recommendations == null || recommendations.isEmpty()) {
recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, capabilities, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
} else {
premadeRecs = true;
}
// If we have premade recommendations passed in and this is trying to create descriptors for HA
// then the HA rec will be at index 1 instead of index 0. Default case is index 0.
int recIndex = (premadeRecs && isHA) ? 1 : 0;
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(recIndex).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(recIndex).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
// If the cgURI is null, try and get it from the source volume.
if (cgURI == null) {
if ((sourceVolume != null) && (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup()))) {
cgURI = sourceVolume.getConsistencyGroup();
targetVolume.setConsistencyGroup(cgURI);
}
}
if ((sourceVolume != null) && NullColumnValueGetter.isNotNullValue(sourceVolume.getReplicationGroupInstance())) {
targetVolume.setReplicationGroupInstance(sourceVolume.getReplicationGroupInstance());
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, capabilities, capacity));
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, migration.getId(), capabilities));
printMigrationInfo(migration, sourceVolume, targetVolume);
return descriptors;
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createVolumes.
/**
* {@inheritDoc}
*
* @throws InternalException
*/
@Override
public TaskList createVolumes(VolumeCreate param, Project project, VirtualArray vArray, VirtualPool vPool, Map<VpoolUse, List<Recommendation>> recommendationMap, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper vPoolCapabilities) throws InternalException {
List<Recommendation> volRecommendations = recommendationMap.get(VpoolUse.ROOT);
List<Recommendation> srdfCopyRecommendations = recommendationMap.get(VpoolUse.SRDF_COPY);
if (taskList == null) {
taskList = new TaskList();
}
List<URI> allVolumes = new ArrayList<URI>();
List<VolumeDescriptor> descriptors = createVPlexVolumeDescriptors(param, project, vArray, vPool, volRecommendations, task, vPoolCapabilities, vPoolCapabilities.getBlockConsistencyGroup(), taskList, allVolumes, true);
for (VolumeDescriptor desc : descriptors) {
s_logger.info("Vplex Root Descriptors: " + desc.toString());
}
if (srdfCopyRecommendations != null) {
// This may be a Vplex volume or not
for (Recommendation srdfCopyRecommendation : srdfCopyRecommendations) {
vArray = _dbClient.queryObject(VirtualArray.class, srdfCopyRecommendation.getVirtualArray());
vPool = srdfCopyRecommendation.getVirtualPool();
List<VolumeDescriptor> srdfCopyDescriptors = new ArrayList<VolumeDescriptor>();
List<Recommendation> copyRecommendations = new ArrayList<Recommendation>();
copyRecommendations.add(srdfCopyRecommendation);
if (srdfCopyRecommendation instanceof VPlexRecommendation) {
String name = param.getName();
// Do not pass in the consistency group for vplex volumes fronting targets
// as we will eventually put them in the target CG.
srdfCopyDescriptors = createVPlexVolumeDescriptors(param, project, vArray, vPool, copyRecommendations, task, vPoolCapabilities, null, taskList, allVolumes, true);
param.setName(name);
} else {
srdfCopyDescriptors = super.createVolumesAndDescriptors(srdfCopyDescriptors, param.getName() + "_srdf_copy", vPoolCapabilities.getSize(), project, vArray, vPool, copyRecommendations, taskList, task, vPoolCapabilities);
}
for (VolumeDescriptor desc : srdfCopyDescriptors) {
s_logger.info("SRDF Copy: " + desc.toString());
}
descriptors.addAll(srdfCopyDescriptors);
}
}
// Log volume descriptor information
logVolumeDescriptorPrecreateInfo(descriptors, task);
// Now we get the Orchestration controller and use it to create the volumes of all types.
try {
BlockOrchestrationController controller = getController(BlockOrchestrationController.class, BlockOrchestrationController.BLOCK_ORCHESTRATION_DEVICE);
controller.createVolumes(descriptors, task);
} catch (InternalException e) {
if (s_logger.isErrorEnabled()) {
s_logger.error("Controller error", e);
}
String errMsg = String.format("Controller error: %s", e.getMessage());
Operation statusUpdate = new Operation(Operation.Status.error.name(), errMsg);
for (URI volumeURI : allVolumes) {
_dbClient.updateTaskOpStatus(Volume.class, volumeURI, task, statusUpdate);
}
for (TaskResourceRep volumeTask : taskList.getTaskList()) {
volumeTask.setState(Operation.Status.error.name());
volumeTask.setMessage(errMsg);
}
throw e;
}
return taskList;
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method deactivateMirror.
/**
* {@inheritDoc}
*/
@Override
public TaskList deactivateMirror(StorageSystem vplexStorageSystem, URI mirrorURI, String taskId, String deleteType) {
TaskList taskList = new TaskList();
try {
VplexMirror mirror = _dbClient.queryObject(VplexMirror.class, mirrorURI);
Volume sourceVolume = _dbClient.queryObject(Volume.class, mirror.getSource().getURI());
Operation op = _dbClient.createTaskOpStatus(Volume.class, sourceVolume.getId(), taskId, ResourceOperationTypeEnum.DEACTIVATE_VOLUME_MIRROR, mirror.getId().toString());
taskList.getTaskList().add(toTask(sourceVolume, Arrays.asList(mirror), taskId, op));
if (VolumeDeleteTypeEnum.VIPR_ONLY.name().equals(deleteType)) {
s_logger.info("Perform ViPR-only delete for VPLEX mirrors %s", mirrorURI);
// Perform any database cleanup that is required.
cleanupForViPROnlyMirrorDelete(Arrays.asList(mirrorURI));
// Mark them inactive.
_dbClient.markForDeletion(_dbClient.queryObject(VplexMirror.class, mirrorURI));
// We must get the volume from the DB again, to properly update the status.
sourceVolume = _dbClient.queryObject(Volume.class, mirror.getSource().getURI());
op = sourceVolume.getOpStatus().get(taskId);
op.ready("VPLEX continuous copy succesfully deleted from ViPR");
sourceVolume.getOpStatus().updateTaskStatus(taskId, op);
_dbClient.updateObject(sourceVolume);
} else {
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
// Add a descriptor for each of the associated volumes.There will be only one associated volume
if (mirror.getAssociatedVolumes() != null) {
for (String assocVolId : mirror.getAssociatedVolumes()) {
Volume assocVolume = _dbClient.queryObject(Volume.class, URI.create(assocVolId));
if (assocVolume != null && !assocVolume.getInactive() && assocVolume.getNativeId() != null) {
// In order to add descriptor for the the backend volumes that needs to be
// deleted we are checking for volume nativeId as well, because its possible
// that we were not able to create backend volume due to SMIS communication
// and rollback didn't clean up VplexMirror and its associated volumes in
// database. So in such a case nativeId will be null and we just want to skip
// sending this volume to SMIS, else it fails with null reference when user
// attempts to cleanup this failed mirror.
VolumeDescriptor assocDesc = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, assocVolume.getStorageController(), assocVolume.getId(), null, null);
descriptors.add(assocDesc);
}
}
}
VPlexController controller = getController();
controller.deactivateMirror(vplexStorageSystem.getId(), mirror.getId(), descriptors, taskId);
}
} catch (ControllerException e) {
String errorMsg = format("Failed to deactivate continuous copy %s: %s", mirrorURI.toString(), e.getMessage());
s_logger.error(errorMsg, e);
for (TaskResourceRep taskResourceRep : taskList.getTaskList()) {
taskResourceRep.setState(Operation.Status.error.name());
taskResourceRep.setMessage(errorMsg);
_dbClient.error(Volume.class, taskResourceRep.getResource().getId(), taskId, e);
}
} catch (Exception e) {
String errorMsg = format("Failed to deactivate continuous copy %s: %s", mirrorURI.toString(), e.getMessage());
s_logger.error(errorMsg, e);
ServiceCoded sc = APIException.internalServerErrors.genericApisvcError(errorMsg, e);
for (TaskResourceRep taskResourceRep : taskList.getTaskList()) {
taskResourceRep.setState(Operation.Status.error.name());
taskResourceRep.setMessage(sc.getMessage());
_dbClient.error(Volume.class, taskResourceRep.getResource().getId(), taskId, sc);
}
}
return taskList;
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method addDescriptorsForVplexMirrors.
public void addDescriptorsForVplexMirrors(List<VolumeDescriptor> descriptors, Volume vplexVolume) {
if (vplexVolume.getMirrors() != null && vplexVolume.getMirrors().isEmpty() == false) {
for (String mirrorId : vplexVolume.getMirrors()) {
VplexMirror mirror = _dbClient.queryObject(VplexMirror.class, URI.create(mirrorId));
if (mirror != null && !mirror.getInactive()) {
if (null != mirror.getAssociatedVolumes()) {
for (String assocVolumeId : mirror.getAssociatedVolumes()) {
Volume volume = _dbClient.queryObject(Volume.class, URI.create(assocVolumeId));
if (volume != null && !volume.getInactive()) {
VolumeDescriptor volDesc = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, volume.getStorageController(), URI.create(assocVolumeId), null, null);
descriptors.add(volDesc);
}
}
}
}
}
}
}
use of com.emc.storageos.blockorchestrationcontroller.VolumeDescriptor in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method migrateVolumesInReplicationGroup.
/**
* Group all volumes in RGs and create a WF to migrate those volumes together.
*
* @param volumes All volumes being considered for migration
* @param vpool The vpool to migrate to
* @param volumesNotInRG A container to store all volumes NOT in an RG
* @param volumesInRG A container to store all the volumes in an RG
* @param controllerOperationsWrapper values from controller called used to determine if
* we need to suspend on commit or deletion of source volumes
* @return taskList Tasks generated for RG migrations
*/
protected TaskList migrateVolumesInReplicationGroup(List<Volume> volumes, VirtualPool vpool, List<Volume> volumesNotInRG, List<Volume> volumesInRG, ControllerOperationValuesWrapper controllerOperationValues) {
TaskList taskList = new TaskList();
// Group all volumes in the request by RG. If there are no volumes in the request
// that are in an RG then the table will be empty.
Table<URI, String, List<Volume>> groupVolumes = VPlexUtil.groupVPlexVolumesByRG(volumes, volumesNotInRG, volumesInRG, _dbClient);
for (Table.Cell<URI, String, List<Volume>> cell : groupVolumes.cellSet()) {
// Get all the volumes in the request that have been grouped by RG
List<Volume> volumesInRGRequest = cell.getValue();
// Grab the first volume
Volume firstVolume = volumesInRGRequest.get(0);
// Get all the volumes from the RG
List<Volume> rgVolumes = VPlexUtil.getVolumesInSameReplicationGroup(cell.getColumnKey(), cell.getRowKey(), firstVolume.getPersonality(), _dbClient);
// We need to migrate all the volumes from the RG together.
if (volumesInRGRequest.size() != rgVolumes.size()) {
throw APIException.badRequests.cantChangeVpoolNotAllCGVolumes();
}
BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, firstVolume.getConsistencyGroup());
URI systemURI = firstVolume.getStorageController();
// vpool change.
if (volumesInRGRequest.size() > _maxCgVolumesForMigration) {
throw APIException.badRequests.cgContainsTooManyVolumesForVPoolChange(cg.getLabel(), volumes.size(), _maxCgVolumesForMigration);
}
// be placed in a CG on the target storage system.
if (volumesInRGRequest.size() > 1) {
s_logger.info("Multiple volume request, verifying target storage systems");
verifyTargetSystemsForCGDataMigration(volumesInRGRequest, vpool, cg.getVirtualArray());
}
// Create a unique task id.
String taskId = UUID.randomUUID().toString();
// Get all volume descriptors for all volumes to be migrated.
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, systemURI);
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
for (Volume volume : volumesInRGRequest) {
descriptors.addAll(createChangeVirtualPoolDescriptors(storageSystem, volume, vpool, taskId, null, null, controllerOperationValues, true));
}
// Create a task object associated with the CG
taskList.getTaskList().add(createTaskForRG(vpool, rgVolumes, taskId));
// Orchestrate the vpool changes of all volumes as a single request.
orchestrateVPoolChanges(volumesInRGRequest, descriptors, taskId);
}
return taskList;
}
Aggregations