use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method upgradeToDistributed.
/**
* Upgrade a local VPLEX volume to a distributed VPLEX volume.
*
* @param vplexURI -- VPLEX System URI
* @param vplexVolume -- VPlex volume (existing).
* @param vpool -- Requested vpool.
* @param taskId
* @throws InternalException
*/
private void upgradeToDistributed(URI vplexURI, Volume vplexVolume, VirtualPool vpool, String transferSpeed, String taskId) throws InternalException {
try {
VirtualArray neighborhood = _dbClient.queryObject(VirtualArray.class, vplexVolume.getVirtualArray());
Set<URI> vplexes = new HashSet<URI>();
vplexes.add(vplexURI);
if (null == vplexVolume.getAssociatedVolumes() || vplexVolume.getAssociatedVolumes().isEmpty()) {
s_logger.error("VPLEX volume {} has no backend volumes.", vplexVolume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(vplexVolume.forDisplay());
}
Iterator<String> assocIter = vplexVolume.getAssociatedVolumes().iterator();
URI existingVolumeURI = new URI(assocIter.next());
Volume existingVolume = _dbClient.queryObject(Volume.class, existingVolumeURI);
if (existingVolume == null || existingVolume.getInactive() == true) {
throw new ServiceCodeException(ServiceCode.UNFORSEEN_ERROR, "Existing volume inactive", new Object[] {});
}
VirtualPoolCapabilityValuesWrapper cosCapabilities = new VirtualPoolCapabilityValuesWrapper();
cosCapabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, getVolumeCapacity(existingVolume));
cosCapabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
cosCapabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, existingVolume.getThinlyProvisioned());
// Get a recommendation.
// Then create the volume.
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
Volume createVolume = null;
// Determine if the user requested a specific HA VirtualArray and an associated HA VirtualPool.
VirtualArray requestedHaVarray = null;
VirtualPool requestedHaVirtualPool = vpool;
if (vpool.getHaVarrayVpoolMap() != null && !vpool.getHaVarrayVpoolMap().isEmpty()) {
for (String haNH : vpool.getHaVarrayVpoolMap().keySet()) {
if (haNH.equals(NullColumnValueGetter.getNullURI().toString())) {
continue;
}
requestedHaVarray = _dbClient.queryObject(VirtualArray.class, new URI(haNH));
String haVirtualPool = vpool.getHaVarrayVpoolMap().get(haNH);
if (haVirtualPool.equals(NullColumnValueGetter.getNullURI().toString())) {
continue;
}
requestedHaVirtualPool = _dbClient.queryObject(VirtualPool.class, new URI(haVirtualPool));
break;
}
}
// Get the recommendations and pick one.
List<Recommendation> recommendations = getBlockScheduler().scheduleStorageForImport(neighborhood, vplexes, requestedHaVarray, requestedHaVirtualPool, cosCapabilities);
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(requestedHaVirtualPool.getLabel(), requestedHaVarray.getLabel(), existingVolume.getId());
}
Recommendation recommendation = recommendations.get(0);
VPlexRecommendation vplexRecommendation = (VPlexRecommendation) recommendation;
if (false == vplexURI.equals(vplexRecommendation.getVPlexStorageSystem())) {
APIException.badRequests.vplexPlacementError(vplexVolume.getId());
}
StorageSystem vplexSystem = _dbClient.queryObject(StorageSystem.class, vplexURI);
Project vplexProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
// Prepare the created volume.
VirtualArray haVirtualArray = _dbClient.queryObject(VirtualArray.class, vplexRecommendation.getVirtualArray());
createVolume = prepareVolumeForRequest(getVolumeCapacity(existingVolume), vplexProject, haVirtualArray, requestedHaVirtualPool, vplexRecommendation.getSourceStorageSystem(), vplexRecommendation.getSourceStoragePool(), vplexVolume.getLabel() + "-1", ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
createVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(createVolume);
VolumeDescriptor desc = new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, createVolume.getStorageController(), createVolume.getId(), createVolume.getPool(), cosCapabilities);
descriptors.add(desc);
// Add a descriptor for the VPlex Virtual Volume.
desc = new VolumeDescriptor(VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexVolume.getStorageController(), vplexVolume.getId(), vplexVolume.getPool(), cosCapabilities);
descriptors.add(desc);
// Now send the command to the controller.
try {
s_logger.info("Calling VPlex controller.");
VPlexController controller = getController();
controller.importVolume(vplexURI, descriptors, null, null, vpool.getId(), null, transferSpeed, Boolean.TRUE, taskId);
// controller.importVolume(vplexURI, vpool.getId(),
// null, null, /* no need to pass System Project/Tenant */
// null, /* no import volume */
// createVolume.getId(), vplexVolume.getId(), taskId);
} catch (InternalException ex) {
s_logger.error("ControllerException on upgradeToDistributed", ex);
String errMsg = String.format("ControllerException: %s", ex.getMessage());
Operation statusUpdate = new Operation(Operation.Status.error.name(), errMsg);
_dbClient.updateTaskOpStatus(Volume.class, vplexVolume.getId(), taskId, statusUpdate);
throw ex;
}
} catch (URISyntaxException ex) {
s_logger.debug("URISyntaxException", ex);
}
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createBackendVolumeMigrationDescriptors.
/**
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
private List<VolumeDescriptor> createBackendVolumeMigrationDescriptors(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<Recommendation> recommendations, boolean isHA, VirtualPoolCapabilityValuesWrapper capabilities) {
// If we know the backend source volume, the new backend volume
// will have the same label and project. Otherwise, the volume
// must be ingested and we know nothing about the backend volume.
// Therefore, we create the label based on the name of the VPLEX
// volume and determine the project in a manner similar to a
// volume creation.
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
URI cgURI = null;
// Check to see if the VirtualPoolCapabilityValuesWrapper have been passed in, if not, create a new one.
if (capabilities != null) {
// The consistency group or null when not specified.
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// this case, we don't want a volume creation to result in backend CGs
if ((consistencyGroup != null) && ((!consistencyGroup.created()) || (consistencyGroup.getTypes().contains(Types.LOCAL.toString())))) {
cgURI = consistencyGroup.getId();
}
} else {
capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
}
boolean premadeRecs = false;
if (recommendations == null || recommendations.isEmpty()) {
recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, capabilities, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
} else {
premadeRecs = true;
}
// If we have premade recommendations passed in and this is trying to create descriptors for HA
// then the HA rec will be at index 1 instead of index 0. Default case is index 0.
int recIndex = (premadeRecs && isHA) ? 1 : 0;
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(recIndex).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(recIndex).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
// If the cgURI is null, try and get it from the source volume.
if (cgURI == null) {
if ((sourceVolume != null) && (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup()))) {
cgURI = sourceVolume.getConsistencyGroup();
targetVolume.setConsistencyGroup(cgURI);
}
}
if ((sourceVolume != null) && NullColumnValueGetter.isNotNullValue(sourceVolume.getReplicationGroupInstance())) {
targetVolume.setReplicationGroupInstance(sourceVolume.getReplicationGroupInstance());
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.BLOCK_DATA, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, capabilities, capacity));
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
descriptors.add(new VolumeDescriptor(VolumeDescriptor.Type.VPLEX_MIGRATE_VOLUME, targetStorageSystem, targetVolumeURI, targetStoragePool, cgURI, migration.getId(), capabilities));
printMigrationInfo(migration, sourceVolume, targetVolume);
return descriptors;
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method prepareBackendVolumeForMigration.
/**
* Deprecated, need to start using createBackendVolumeMigrationDescriptors for use in
* BlockOrchestration.
*
* Does the work necessary to prepare the passed backend volume for the
* passed virtual volume to be migrated to a new volume with a new VirtualPool.
*
* @param vplexSystem A reference to the Vplex storage system
* @param virtualVolume A reference to the virtual volume.
* @param sourceVolume A reference to the backend volume to be migrated.
* @param nh A reference to the varray for the backend volume.
* @param vpool A reference to the VirtualPool for the new volume.
* @param capacity The capacity for the migration target.
* @param taskId The task identifier.
* @param newVolumes An OUT parameter to which the new volume is added.
* @param migrationMap A OUT parameter to which the new migration is added.
* @param poolVolumeMap An OUT parameter associating the new Volume to the
* storage pool in which it will be created.
*/
@Deprecated
private void prepareBackendVolumeForMigration(StorageSystem vplexSystem, Volume virtualVolume, Volume sourceVolume, VirtualArray varray, VirtualPool vpool, Long capacity, String taskId, List<URI> newVolumes, Map<URI, URI> migrationMap, Map<URI, URI> poolVolumeMap) {
URI sourceVolumeURI = null;
Project targetProject = null;
String targetLabel = null;
if (sourceVolume != null) {
// Since we know the source volume, this is not an ingested
// VPLEX volume that is being migrated. Ideally we would just
// give the new backend volume the same name as the current
// i.e., source. However, this is a problem if the migration
// is on the same array. We can't have two volumes with the
// same name. Eventually the source will go away, but not until
// after the migration is completed. The backend volume names
// are basically irrelevant, but we still want them tied to
// the VPLEX volume name.
//
// When initially created, the names are <vvolname>-0 or
// <vvolname>-1, depending upon if it is the source side
// backend volume or HA side backend volume. The volume may
// also have an additional suffix of "-<1...N>" if the
// VPLEX volume was created as part of a multi-volume creation
// request, where N was the number of volumes requested. When
// a volume is first migrated, we will append a "m" to the current
// source volume name to ensure name uniqueness. If the volume
// happens to be migrated again, we'll remove the extra character.
// We'll go back forth in this manner for each migration of that
// backend volume.
sourceVolumeURI = sourceVolume.getId();
targetProject = _dbClient.queryObject(Project.class, sourceVolume.getProject().getURI());
targetLabel = sourceVolume.getLabel();
if (!targetLabel.endsWith(MIGRATION_LABEL_SUFFIX)) {
targetLabel += MIGRATION_LABEL_SUFFIX;
} else {
targetLabel = targetLabel.substring(0, targetLabel.length() - 1);
}
} else {
// The VPLEX volume must be ingested and now the backend
// volume(s) are being migrated. We have no idea what the
// source volume name is. Therefore, we can just give
// them initial extensions. It is highly unlikely that
// they will have the same naming conventions for their
// backend volumes.
targetProject = getVplexProject(vplexSystem, _dbClient, _tenantsService);
targetLabel = virtualVolume.getLabel();
if (virtualVolume.getVirtualArray().equals(varray.getId())) {
targetLabel += SRC_BACKEND_VOL_LABEL_SUFFIX;
} else {
targetLabel += HA_BACKEND_VOL_LABEL_SUFFIX;
}
}
// Get the recommendation for this volume placement.
URI cgURI = null;
if (!NullColumnValueGetter.isNullURI(sourceVolume.getConsistencyGroup())) {
cgURI = sourceVolume.getConsistencyGroup();
}
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystem.getId());
VirtualPoolCapabilityValuesWrapper cosWrapper = new VirtualPoolCapabilityValuesWrapper();
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.SIZE, capacity);
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (cgURI != null) {
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, cgURI);
}
List<Recommendation> recommendations = getBlockScheduler().scheduleStorage(varray, requestedVPlexSystems, null, vpool, false, null, null, cosWrapper, targetProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(vpool.getLabel(), varray.getLabel(), sourceVolumeURI);
}
s_logger.info("Got recommendation");
// Create a volume for the new backend volume to which
// data will be migrated.
URI targetStorageSystem = recommendations.get(0).getSourceStorageSystem();
URI targetStoragePool = recommendations.get(0).getSourceStoragePool();
Volume targetVolume = prepareVolumeForRequest(capacity, targetProject, varray, vpool, targetStorageSystem, targetStoragePool, targetLabel, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
if (cgURI != null) {
targetVolume.setConsistencyGroup(cgURI);
}
targetVolume.addInternalFlags(Flag.INTERNAL_OBJECT);
_dbClient.updateObject(targetVolume);
s_logger.info("Prepared volume {}", targetVolume.getId());
// Add the volume to the passed new volumes list and pool
// volume map.
URI targetVolumeURI = targetVolume.getId();
newVolumes.add(targetVolumeURI);
poolVolumeMap.put(targetStoragePool, targetVolumeURI);
// Create a migration to represent the migration of data
// from the backend volume to the new backend volume for the
// passed virtual volume and add the migration to the passed
// migrations list.
Migration migration = prepareMigration(virtualVolume.getId(), sourceVolumeURI, targetVolumeURI, taskId);
migrationMap.put(targetVolumeURI, migration.getId());
s_logger.info("Prepared migration {}.", migration.getId());
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method upgradeToProtectedVolume.
/**
* Upgrade a local block volume to a protected RP volume
*
* @param changeVpoolVolume the existing volume being protected.
* @param newVpool the requested virtual pool
* @param vpoolChangeParam Param sent down by the API Service
* @param taskId the task identifier
* @throws InternalException
*/
private void upgradeToProtectedVolume(Volume changeVpoolVolume, VirtualPool newVpool, VirtualPoolChangeParam vpoolChangeParam, String taskId) throws InternalException {
Project project = _dbClient.queryObject(Project.class, changeVpoolVolume.getProject());
if (VirtualPool.vPoolSpecifiesProtection(newVpool)) {
// the volume from the existing CG before they can proceed.
if (!NullColumnValueGetter.isNullURI(changeVpoolVolume.getConsistencyGroup())) {
BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, changeVpoolVolume.getConsistencyGroup());
throw APIException.badRequests.cannotCreateRPVolumesInCG(changeVpoolVolume.getLabel(), cg.getLabel());
}
// The user needs to specify a CG for this operation.
if (vpoolChangeParam.getConsistencyGroup() == null) {
throw APIException.badRequests.addRecoverPointProtectionRequiresCG();
}
if (!CollectionUtils.isEmpty(getSnapshotsForVolume(changeVpoolVolume))) {
throw APIException.badRequests.cannotAddProtectionWhenSnapshotsExist(changeVpoolVolume.getLabel());
}
}
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, changeVpoolVolume.getCapacity());
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, 1);
capabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, vpoolChangeParam.getConsistencyGroup());
capabilities.put(VirtualPoolCapabilityValuesWrapper.CHANGE_VPOOL_VOLUME, changeVpoolVolume.getId().toString());
List<Recommendation> recommendations = getRecommendationsForVirtualPoolChangeRequest(changeVpoolVolume, newVpool, vpoolChangeParam, capabilities);
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolume();
}
// Get the volume's varray
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, changeVpoolVolume.getVirtualArray());
// Generate a VolumeCreate object that contains the information that createVolumes likes to consume.
VolumeCreate param = new VolumeCreate(changeVpoolVolume.getLabel(), String.valueOf(changeVpoolVolume.getCapacity()), 1, newVpool.getId(), changeVpoolVolume.getVirtualArray(), changeVpoolVolume.getProject().getURI());
TaskList taskList = new TaskList();
createTaskForVolume(changeVpoolVolume, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VPOOL, taskList, taskId);
Map<VpoolUse, List<Recommendation>> recommendationMap = new HashMap<VpoolUse, List<Recommendation>>();
recommendationMap.put(VpoolUse.ROOT, recommendations);
createVolumes(param, project, varray, newVpool, recommendationMap, taskList, taskId, capabilities);
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method upgradeToSRDFTargetVolume.
/**
* Upgrade a local block volume to a protected SRDF volume
*
* @param volume
* -- srdf source volume (existing).
* @param vpool
* -- Requested vpool.
* @param taskId
* @throws InternalException
*/
private List<VolumeDescriptor> upgradeToSRDFTargetVolume(final Volume volume, final VirtualPool vpool, final VirtualPoolChangeParam cosChangeParam, final String taskId) throws InternalException {
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, volume.getConsistencyGroup());
List<Recommendation> recommendations = getRecommendationsForVirtualPoolChangeRequest(volume, vpool, cosChangeParam);
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolume();
}
// Call out to the respective block service implementation to prepare and create the
// volumes based on the recommendations.
Project project = _dbClient.queryObject(Project.class, volume.getProject());
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, volume.getVirtualArray());
// Generate a VolumeCreate object that contains the information that createVolumes likes to
// consume.
VolumeCreate param = new VolumeCreate(volume.getLabel(), String.valueOf(volume.getCapacity()), 1, vpool.getId(), volume.getVirtualArray(), volume.getProject().getURI());
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (volume.getIsComposite()) {
// add meta volume properties to the capabilities instance
capabilities.put(VirtualPoolCapabilityValuesWrapper.IS_META_VOLUME, volume.getIsComposite());
capabilities.put(VirtualPoolCapabilityValuesWrapper.META_VOLUME_TYPE, volume.getCompositionType());
capabilities.put(VirtualPoolCapabilityValuesWrapper.META_VOLUME_MEMBER_COUNT, volume.getMetaMemberCount());
capabilities.put(VirtualPoolCapabilityValuesWrapper.META_VOLUME_MEMBER_SIZE, volume.getMetaMemberSize());
_log.debug(String.format("Capabilities : isMeta: %s, Meta Type: %s, Member size: %s, Count: %s", capabilities.getIsMetaVolume(), capabilities.getMetaVolumeType(), capabilities.getMetaVolumeMemberSize(), capabilities.getMetaVolumeMemberCount()));
}
TaskList taskList = new TaskList();
// Prepare the Bourne Volumes to be created and associated
// with the actual storage system volumes created. Also create
// a BlockTaskList containing the list of task resources to be
// returned for the purpose of monitoring the volume creation
// operation for each volume to be created.
String volumeLabel = param.getName();
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// prepare the volumes
List<URI> volumeURIs = prepareRecommendedVolumes(taskId, taskList, project, varray, vpool, capabilities.getResourceCount(), recommendations, consistencyGroup, volumeLabel, param.getSize());
List<VolumeDescriptor> resultListVolumeDescriptors = new ArrayList<>();
// Execute the volume creations requests for each recommendation.
Iterator<Recommendation> recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
Recommendation recommendation = recommendationsIter.next();
try {
List<VolumeDescriptor> volumeDescriptors = createVolumeDescriptors((SRDFRecommendation) recommendation, volumeURIs, capabilities);
// Log volume descriptor information
logVolumeDescriptorPrecreateInfo(volumeDescriptors, taskId);
resultListVolumeDescriptors.addAll(volumeDescriptors);
} catch (InternalException e) {
if (_log.isErrorEnabled()) {
_log.error("Controller error", e);
}
String errorMsg = String.format("Controller error: %s", e.getMessage());
if (volumeURIs != null) {
for (URI volumeURI : volumeURIs) {
Volume volume1 = _dbClient.queryObject(Volume.class, volumeURI);
if (volume1 != null) {
Operation op = new Operation();
ServiceCoded coded = ServiceError.buildServiceError(ServiceCode.API_RP_VOLUME_CREATE_ERROR, errorMsg.toString());
op.setMessage(errorMsg);
op.error(coded);
_dbClient.createTaskOpStatus(Volume.class, volumeURI, taskId, op);
TaskResourceRep volumeTask = toTask(volume1, taskId, op);
if (volume1.getPersonality() != null && volume1.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
taskList.getTaskList().add(volumeTask);
}
}
}
}
// the user what succeeded and what failed.
throw APIException.badRequests.cannotCreateSRDFVolumes(e);
}
}
return resultListVolumeDescriptors;
}
Aggregations