use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method upgradeToTargetVolume.
/**
* Upgrade a local block volume to a protected SRDF volume
*
* @param volume
* -- VPlex volume (existing).
* @param vpool
* -- Requested vpool.
* @param taskId
* @throws InternalException
*/
private void upgradeToTargetVolume(final Volume volume, final VirtualPool vpool, final VirtualPoolChangeParam cosChangeParam, final String taskId) throws InternalException {
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.BLOCK_CONSISTENCY_GROUP, volume.getConsistencyGroup());
List<Recommendation> recommendations = getRecommendationsForVirtualPoolChangeRequest(volume, vpool, cosChangeParam);
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolume();
}
// Call out to the respective block service implementation to prepare and create the
// volumes based on the recommendations.
Project project = _dbClient.queryObject(Project.class, volume.getProject());
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, volume.getVirtualArray());
// Generate a VolumeCreate object that contains the information that createVolumes likes to
// consume.
VolumeCreate param = new VolumeCreate(volume.getLabel(), String.valueOf(volume.getCapacity()), 1, vpool.getId(), volume.getVirtualArray(), volume.getProject().getURI());
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (volume.getIsComposite()) {
// add meta volume properties to the capabilities instance
capabilities.put(VirtualPoolCapabilityValuesWrapper.IS_META_VOLUME, volume.getIsComposite());
capabilities.put(VirtualPoolCapabilityValuesWrapper.META_VOLUME_TYPE, volume.getCompositionType());
capabilities.put(VirtualPoolCapabilityValuesWrapper.META_VOLUME_MEMBER_COUNT, volume.getMetaMemberCount());
capabilities.put(VirtualPoolCapabilityValuesWrapper.META_VOLUME_MEMBER_SIZE, volume.getMetaMemberSize());
_log.debug(String.format("Capabilities : isMeta: %s, Meta Type: %s, Member size: %s, Count: %s", capabilities.getIsMetaVolume(), capabilities.getMetaVolumeType(), capabilities.getMetaVolumeMemberSize(), capabilities.getMetaVolumeMemberCount()));
}
Map<VpoolUse, List<Recommendation>> recommendationMap = new HashMap<VpoolUse, List<Recommendation>>();
recommendationMap.put(VpoolUse.ROOT, recommendations);
createVolumes(param, project, varray, vpool, recommendationMap, null, taskId, capabilities);
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class FileService method createContinuousCopies.
/**
* Create Continuous Copies
*
* @param id
* the URN of a ViPR fileSystem
* @param param
* File Replication Create parameter
* @brief Define continuous copies
* @return TaskResponse
* @throws InternalException
* @throws APIException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/protection/continuous-copies/create")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.OWN, ACL.ALL })
public TaskResourceRep createContinuousCopies(@PathParam("id") URI id, FileReplicationCreateParam param) throws InternalException, APIException {
_log.info("Request to create replication copies for filesystem {}", id);
// Validate the FS id.
ArgValidator.checkFieldUriType(id, FileShare.class, "id");
FileShare fs = queryResource(id);
FileShare orgFs = queryResource(id);
String task = UUID.randomUUID().toString();
ArgValidator.checkEntity(fs, id, isIdEmbeddedInURL(id));
TaskList taskList = new TaskList();
// Make sure that we don't have some pending
// operation against the file system!!!
checkForPendingTasks(Arrays.asList(fs.getTenant().getURI()), Arrays.asList(fs));
// Get the project.
URI projectURI = fs.getProject().getURI();
Project project = _permissionsHelper.getObjectById(projectURI, Project.class);
ArgValidator.checkEntity(project, projectURI, false);
_log.info("Found filesystem project {}", projectURI);
VirtualPool currentVpool = _dbClient.queryObject(VirtualPool.class, fs.getVirtualPool());
StringBuffer notSuppReasonBuff = new StringBuffer();
// Verify the file system and its vPool are capable of doing replication!!!
if (!FileSystemReplicationUtils.isSupportedFileReplicationCreate(fs, currentVpool, notSuppReasonBuff)) {
_log.error("create mirror copies is not supported for file system {} due to {}", fs.getId().toString(), notSuppReasonBuff.toString());
throw APIException.badRequests.unableToCreateMirrorCopies(fs.getId(), notSuppReasonBuff.toString());
}
// Get the virtual array!!!
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, fs.getVirtualArray());
// New operation
Operation op = new Operation();
op.setResourceType(ResourceOperationTypeEnum.CREATE_FILE_SYSTEM_MIRROR_COPIES);
op.setDescription("Create file system mirror operation");
op = _dbClient.createTaskOpStatus(FileShare.class, fs.getId(), task, op);
TaskResourceRep fileSystemTask = toTask(fs, task, op);
taskList.getTaskList().add(fileSystemTask);
StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
// prepare vpool capability values
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, fs.getCapacity());
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(currentVpool.getSupportedProvisioningType())) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.TRUE);
}
// Set the source file system details
// source fs details used in finding recommendations for target fs!!
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_SYSTEM_CREATE_MIRROR_COPY, Boolean.TRUE);
capabilities.put(VirtualPoolCapabilityValuesWrapper.EXISTING_SOURCE_FILE_SYSTEM, fs);
capabilities.put(VirtualPoolCapabilityValuesWrapper.SOURCE_STORAGE_SYSTEM, device);
StringBuilder errorMsg = new StringBuilder();
if (!FilePolicyServiceUtils.updatePolicyCapabilities(_dbClient, varray, currentVpool, project, null, capabilities, errorMsg)) {
_log.error("File system can not be created, ", errorMsg.toString());
throw APIException.badRequests.unableToProcessRequest(errorMsg.toString());
}
if (param.getCopyName() != null && !param.getCopyName().isEmpty()) {
// No need to generate any name -- Since the requirement is to use the customizing label we should use the
// same.
// Stripping out the special characters like ; /-+!@#$%^&())";:[]{}\ | but allow underscore character _
String convertedName = param.getCopyName().replaceAll("[^\\dA-Za-z\\_]", "");
_log.info("Original copy name {} and converted copy name {}", param.getCopyName(), convertedName);
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_TARGET_COPY_NAME, convertedName);
}
FileServiceApi fileServiceApi = getFileShareServiceImpl(capabilities, _dbClient);
try {
// Call out placementManager to get the recommendation for placement.
List recommendations = _filePlacementManager.getRecommendationsForFileCreateRequest(varray, project, currentVpool, capabilities);
// Verify the source virtual pool recommendations meets source fs storage!!!
fileServiceApi.createTargetsForExistingSource(fs, project, currentVpool, varray, taskList, task, recommendations, capabilities);
} catch (BadRequestException e) {
// Revert the file system to original state!!!
restoreFromOriginalFs(orgFs, fs);
_dbClient.updateObject(fs);
op = _dbClient.error(FileShare.class, fs.getId(), task, e);
_log.error("Create file system mirror copy failed {}, {}", e.getMessage(), e);
throw e;
} catch (InternalException e) {
// Revert the file system to original state!!!
restoreFromOriginalFs(orgFs, fs);
_dbClient.updateObject(fs);
op = _dbClient.error(FileShare.class, fs.getId(), task, e);
_log.error("Create file system mirror copy failed {}, {}", e.getMessage(), e);
throw e;
} catch (Exception e) {
_log.error("Create file system mirror copy failed {}, {}", e.getMessage(), e);
throw APIException.badRequests.unableToProcessRequest(e.getMessage());
}
auditOp(OperationTypeEnum.CREATE_MIRROR_FILE_SYSTEM, true, AuditLogManager.AUDITOP_BEGIN, fs.getLabel(), currentVpool.getLabel(), fs.getLabel(), project == null ? null : project.getId().toString());
return taskList.getTaskList().get(0);
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class FileService method assignFileReplicationPolicyToFS.
private TaskResourceRep assignFileReplicationPolicyToFS(FileShare fs, FilePolicy filePolicy, FilePolicyFileSystemAssignParam param, String task) {
StringBuffer notSuppReasonBuff = new StringBuffer();
// Verify the fs has replication attributes!!!
if (fs.getPersonality() != null && PersonalityTypes.SOURCE.name().equalsIgnoreCase(fs.getPersonality()) && fs.getMirrorfsTargets() != null && !fs.getMirrorfsTargets().isEmpty()) {
notSuppReasonBuff.append(String.format("File system %s has active target file systems", fs.getLabel()));
_log.error(notSuppReasonBuff.toString());
throw APIException.badRequests.unableToProcessRequest(notSuppReasonBuff.toString());
}
// File system should not be the target file system..
if (fs.getPersonality() != null && fs.getPersonality().equalsIgnoreCase(PersonalityTypes.TARGET.name())) {
notSuppReasonBuff.append(String.format("File system - %s given in request is an active Target file system.", fs.getLabel()));
_log.error(notSuppReasonBuff.toString());
throw APIException.badRequests.unableToProcessRequest(notSuppReasonBuff.toString());
}
ArgValidator.checkFieldNotNull(param.getTargetVArrays(), "target_varrays");
Set<URI> targertVarrayURIs = param.getTargetVArrays();
for (URI targertVarrayURI : targertVarrayURIs) {
ArgValidator.checkFieldUriType(targertVarrayURI, VirtualArray.class, "target_varray");
VirtualArray targetVarray = _permissionsHelper.getObjectById(targertVarrayURI, VirtualArray.class);
ArgValidator.checkEntity(targetVarray, targertVarrayURI, false);
}
VirtualArray sourceVarray = _dbClient.queryObject(VirtualArray.class, fs.getVirtualArray());
// Get the project.
URI projectURI = fs.getProject().getURI();
Project project = _permissionsHelper.getObjectById(projectURI, Project.class);
VirtualPool vpool = _dbClient.queryObject(VirtualPool.class, fs.getVirtualPool());
// New operation
TaskList taskList = new TaskList();
Operation op = _dbClient.createTaskOpStatus(FileShare.class, fs.getId(), task, ResourceOperationTypeEnum.ASSIGN_FILE_POLICY_TO_FILE_SYSTEM);
op.setDescription("assign file policy to file system");
// As the action done by tenant admin
// Set current tenant as task's tenant!!!
Task taskObj = op.getTask(fs.getId());
FilePolicyServiceUtils.updateTaskTenant(_dbClient, filePolicy, "assign", taskObj, fs.getTenant().getURI());
TaskResourceRep fileShareTask = toTask(fs, task, op);
taskList.getTaskList().add(fileShareTask);
StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
// prepare vpool capability values
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, fs.getCapacity());
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
if (VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(vpool.getSupportedProvisioningType())) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.THIN_PROVISIONING, Boolean.TRUE);
}
// Set the source file system details
// source fs details used in finding recommendations for target fs!!
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_SYSTEM_CREATE_MIRROR_COPY, Boolean.TRUE);
capabilities.put(VirtualPoolCapabilityValuesWrapper.EXISTING_SOURCE_FILE_SYSTEM, fs);
capabilities.put(VirtualPoolCapabilityValuesWrapper.SOURCE_STORAGE_SYSTEM, device);
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_REPLICATION_TYPE, filePolicy.getFileReplicationType());
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_REPLICATION_COPY_MODE, filePolicy.getFileReplicationCopyMode());
Set<String> targetVArrys = new HashSet<String>();
if (filePolicy.getFileReplicationType().equalsIgnoreCase(FileReplicationType.REMOTE.name())) {
for (URI targertVarrayURI : targertVarrayURIs) {
targetVArrys.add(targertVarrayURI.toString());
}
} else {
targetVArrys.add(sourceVarray.getId().toString());
}
URI targetvPool = null;
// Get the existing topologies for the policy
if (filePolicy.getReplicationTopologies() != null && !filePolicy.getReplicationTopologies().isEmpty()) {
for (String strTopology : filePolicy.getReplicationTopologies()) {
FileReplicationTopology dbTopology = _dbClient.queryObject(FileReplicationTopology.class, URI.create(strTopology));
Set<String> dbTargetVArrys = new HashSet<String>();
if (dbTopology != null && sourceVarray.getId().toString().equalsIgnoreCase(dbTopology.getSourceVArray().toString())) {
dbTargetVArrys.addAll(dbTopology.getTargetVArrays());
if (dbTargetVArrys.containsAll(targetVArrys)) {
// which are created from older release remote replication vpool
for (String targetVarray : targetVArrys) {
if (dbTopology.getTargetVAVPool() != null && !dbTopology.getTargetVAVPool().isEmpty()) {
String[] vavPool = dbTopology.getTargetVAVPool().split(SEPARATOR);
if (vavPool != null && vavPool.length > 1 && targetVarray.equalsIgnoreCase(vavPool[0])) {
String strvPool = vavPool[1];
VirtualPool vPool = _dbClient.queryObject(VirtualPool.class, URI.create(strvPool));
if (vPool != null && !vPool.getInactive()) {
targetvPool = vPool.getId();
}
}
}
}
}
break;
}
}
}
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_REPLICATION_TARGET_VARRAYS, targetVArrys);
if (targetvPool != null) {
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_REPLICATION_TARGET_VPOOL, targetvPool);
} else {
capabilities.put(VirtualPoolCapabilityValuesWrapper.FILE_REPLICATION_TARGET_VPOOL, vpool.getId());
}
FileServiceApi fileServiceApi = getFileShareServiceImpl(capabilities, _dbClient);
try {
// Call out placementManager to get the recommendation for placement.
List recommendations = _filePlacementManager.getRecommendationsForFileCreateRequest(sourceVarray, project, vpool, capabilities);
fileServiceApi.assignFilePolicyToFileSystem(fs, filePolicy, project, vpool, sourceVarray, taskList, task, recommendations, capabilities);
} catch (BadRequestException e) {
_dbClient.error(FileShare.class, fs.getId(), task, e);
_log.error("Error Assigning Filesystem policy {}, {}", e.getMessage(), e);
throw e;
} catch (Exception e) {
_log.error("Error Assigning Filesystem policy {}, {}", e.getMessage(), e);
throw APIException.badRequests.unableToProcessRequest(e.getMessage());
}
return fileShareTask;
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class MigrationService method migrateVolume.
/**
* Performs a non-disruptive migration for the passed VPLEX virtual volume.
* The backend volume of the VPLEX volume that is migrated is the backend
* volume on the passed source storage system. The volume is migrated to the
* passed target storage system, which must be connected to the same VPLEX
* cluster as the source storage system.
*
* @prereq none
*
* @param migrateParam A reference to the migration parameters.
* @deprecated Use the Change Virtual Pool API instead
* @brief Perform a non-disruptive migration for a VPLEX volume.
* @return A TaskResourceRep for the volume being migrated.
* @throws InternalException
*/
@Deprecated
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_ADMIN })
public TaskResourceRep migrateVolume(MigrationParam migrateParam) throws InternalException {
// Create a unique task id.
String taskId = UUID.randomUUID().toString();
s_logger.info("Migrate volume {} from storage system {} to storage system {}", new Object[] { migrateParam.getVolume(), migrateParam.getSrcStorageSystem(), migrateParam.getTgtStorageSystem() });
// Verify the requested volume supports migration.
Volume vplexVolume = verifyRequestedVolumeSupportsMigration(migrateParam.getVolume());
s_logger.debug("Verfified requested volume");
// Make sure that we don't have some pending
// operation against the volume
checkForPendingTasks(Arrays.asList(vplexVolume.getTenant().getURI()), Arrays.asList(vplexVolume));
// Determine the backend volume of the requested VPlex volume that
// is to be migrated. It is the volume on the passed source storage
// system.
Volume migrationSrc = getMigrationSource(vplexVolume, migrateParam.getSrcStorageSystem());
s_logger.debug("Migration source is {}", migrationSrc.getId());
// The project for the migration target will be the same as that
// of the source.
Project migrationTgtProject = _permissionsHelper.getObjectById(migrationSrc.getProject().getURI(), Project.class);
s_logger.debug("Migration target project is {}", migrationTgtProject.getId());
// The VirtualArray for the migration target will be the same as
// that of the source.
VirtualArray migrationTargetVarray = _permissionsHelper.getObjectById(migrationSrc.getVirtualArray(), VirtualArray.class);
s_logger.debug("Migration target VirtualArray is {}", migrationTargetVarray.getId());
// Verify the requested target storage system exists and
// is a system to which the migration source volume can
// be migrated.
verifyTargetStorageSystemForMigration(migrateParam.getVolume(), vplexVolume.getStorageController(), migrateParam.getSrcStorageSystem(), migrateParam.getTgtStorageSystem());
s_logger.debug("Verified target storage system {}", migrateParam.getTgtStorageSystem());
// Get the VirtualPool for the migration target.
VirtualPool migrationTgtCos = getVirtualPoolForMigrationTarget(migrateParam.getVirtualPool(), vplexVolume, migrationSrc);
s_logger.debug("Migration target VirtualPool is {}", migrationTgtCos.getId());
// Get the VPlex storage system for the virtual volume.
URI vplexSystemURI = vplexVolume.getStorageController();
Set<URI> requestedVPlexSystems = new HashSet<URI>();
requestedVPlexSystems.add(vplexSystemURI);
// Get a placement recommendation on the requested target storage
// system connected to the VPlex storage system of the VPlex volume.
VPlexScheduler vplexScheduler = _vplexBlockServiceApi.getBlockScheduler();
VirtualPoolCapabilityValuesWrapper cosWrapper = new VirtualPoolCapabilityValuesWrapper();
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.SIZE, migrationSrc.getCapacity());
cosWrapper.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, new Integer(1));
List<Recommendation> recommendations = vplexScheduler.scheduleStorage(migrationTargetVarray, requestedVPlexSystems, migrateParam.getTgtStorageSystem(), migrationTgtCos, false, null, null, cosWrapper, migrationTgtProject, VpoolUse.ROOT, new HashMap<VpoolUse, List<Recommendation>>());
if (recommendations.isEmpty()) {
throw APIException.badRequests.noStorageFoundForVolumeMigration(migrationTgtCos.getLabel(), migrationTargetVarray.getLabel(), vplexVolume.getId());
}
s_logger.debug("Got recommendation for migration target");
// There should be a single recommendation.
Recommendation recommendation = recommendations.get(0);
URI recommendedSystem = recommendation.getSourceStorageSystem();
URI recommendedPool = recommendation.getSourceStoragePool();
s_logger.debug("Recommendation storage system is {}", recommendedSystem);
s_logger.debug("Recommendation storage pool is {}", recommendedPool);
// Prepare the migration target.
List<URI> migrationTgts = new ArrayList<URI>();
Map<URI, URI> poolTgtMap = new HashMap<URI, URI>();
Long size = _vplexBlockServiceApi.getVolumeCapacity(migrationSrc);
Volume migrationTgt = VPlexBlockServiceApiImpl.prepareVolumeForRequest(size, migrationTgtProject, migrationTargetVarray, migrationTgtCos, recommendedSystem, recommendedPool, migrationSrc.getLabel(), ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME, taskId, _dbClient);
URI migrationTgtURI = migrationTgt.getId();
migrationTgts.add(migrationTgtURI);
poolTgtMap.put(recommendedPool, migrationTgtURI);
s_logger.debug("Prepared migration target volume {}", migrationTgtURI);
// Prepare the migration.
Map<URI, URI> migrationsMap = new HashMap<URI, URI>();
Migration migration = _vplexBlockServiceApi.prepareMigration(migrateParam.getVolume(), migrationSrc.getId(), migrationTgt.getId(), taskId);
migrationsMap.put(migrationTgtURI, migration.getId());
s_logger.debug("Prepared migration {}", migration.getId());
// Create a task for the virtual volume being migrated and set the
// initial task state to pending.
Operation op = _dbClient.createTaskOpStatus(Volume.class, vplexVolume.getId(), taskId, ResourceOperationTypeEnum.MIGRATE_BLOCK_VOLUME);
TaskResourceRep task = toTask(vplexVolume, taskId, op);
s_logger.debug("Created task for volume {}", migrateParam.getVolume());
try {
VPlexController controller = _vplexBlockServiceApi.getController();
String successMsg = String.format("Migration succeeded for volume %s", migrateParam.getVolume());
String failMsg = String.format("Migration failed for volume %s", migrateParam.getVolume());
controller.migrateVolumes(vplexSystemURI, migrateParam.getVolume(), migrationTgts, migrationsMap, poolTgtMap, (migrateParam.getVirtualPool() != null ? migrateParam.getVirtualPool() : null), null, successMsg, failMsg, null, taskId, null);
s_logger.debug("Got VPlex controller and created migration workflow");
} catch (InternalException e) {
s_logger.error("Controller Error", e);
String errMsg = String.format("Controller Error: %s", e.getMessage());
task.setState(Operation.Status.error.name());
task.setMessage(errMsg);
Operation opStatus = new Operation(Operation.Status.error.name(), errMsg);
_dbClient.updateTaskOpStatus(Volume.class, task.getResource().getId(), taskId, opStatus);
migrationTgt.setInactive(true);
_dbClient.persistObject(migrationTgt);
migration.setInactive(true);
_dbClient.persistObject(migration);
throw e;
}
return task;
}
use of com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper in project coprhd-controller by CoprHD.
the class VolumeService method createVolume.
/**
* The fundamental abstraction in the Block Store is a
* volume. A volume is a unit of block storage capacity that has been
* allocated by a consumer to a project. This API allows the user to
* create one or more volumes. The volumes are created in the same
* storage pool.
*
* NOTE: This is an asynchronous operation.
*
* @prereq none
*
* @param param POST data containing the volume creation information.
*
* @brief Create volume
* @return Details of the newly created volume
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public Response createVolume(@PathParam("tenant_id") String openstackTenantId, @HeaderParam("X-Cinder-V1-Call") String isV1Call, VolumeCreateRequestGen param, @Context HttpHeaders header) throws InternalException {
// Step 1: Parameter validation
Project project = getCinderHelper().getProject(openstackTenantId, getUserFromContext());
String snapshotId = param.volume.snapshot_id;
String sourceVolId = param.volume.source_volid;
String imageId = param.volume.imageRef;
String consistencygroup_id = param.volume.consistencygroup_id;
String volume_type = param.volume.volume_type;
boolean hasConsistencyGroup = false;
if (project == null) {
if (openstackTenantId != null) {
throw APIException.badRequests.projectWithTagNonexistent(openstackTenantId);
} else {
throw APIException.badRequests.parameterIsNullOrEmpty(PROJECT_TENANTID_NULL);
}
}
URI tenantUri = project.getTenantOrg().getURI();
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, tenantUri);
if (tenant == null)
throw APIException.notFound.unableToFindUserScopeOfSystem();
_log.debug("Create volume: project = {}, tenant = {}", project.getLabel(), tenant.getLabel());
if (param.volume.size <= 0) {
_log.error("volume size should not be zero or negative ={} ", param.volume.size);
return CinderApiUtils.createErrorResponse(400, "Bad Request : Invalid Volume size");
}
long requestedSize = param.volume.size * GB;
// convert volume type from name to vpool
VirtualPool vpool = getVpool(param.volume.volume_type);
Volume sourceVolume = null;
if (vpool == null) {
if (sourceVolId != null) {
sourceVolume = findVolume(sourceVolId, openstackTenantId);
if (sourceVolume == null) {
_log.error("Invalid Source Volume ID ={} ", sourceVolId);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid Source Volume ID " + sourceVolId);
}
vpool = _dbClient.queryObject(VirtualPool.class, sourceVolume.getVirtualPool());
} else {
_log.error("Invalid Volume Type ={} ", volume_type);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid Volume Type " + volume_type);
}
}
if (!validateVolumeCreate(openstackTenantId, null, requestedSize)) {
_log.info("The volume can not be created because of insufficient project quota.");
throw APIException.badRequests.insufficientQuotaForProject(project.getLabel(), "volume");
} else if (!validateVolumeCreate(openstackTenantId, vpool, requestedSize)) {
_log.info("The volume can not be created because of insufficient quota for virtual pool.");
throw APIException.badRequests.insufficientQuotaForVirtualPool(vpool.getLabel(), "virtual pool");
}
_log.debug("Create volume: vpool = {}", vpool.getLabel());
VirtualArray varray = getCinderHelper().getVarray(param.volume.availability_zone, getUserFromContext());
if ((snapshotId == null) && (sourceVolId == null) && (varray == null)) {
// otherwise availability_zone exception will be thrown
throw APIException.badRequests.parameterIsNotValid(param.volume.availability_zone);
}
// Validating consistency group
URI blockConsistencyGroupId = null;
BlockConsistencyGroup blockConsistencyGroup = null;
if (consistencygroup_id != null) {
_log.info("Verifying for consistency group : " + consistencygroup_id);
blockConsistencyGroup = (BlockConsistencyGroup) getCinderHelper().queryByTag(URI.create(consistencygroup_id), getUserFromContext(), BlockConsistencyGroup.class);
if (getCinderHelper().verifyConsistencyGroupHasSnapshot(blockConsistencyGroup)) {
_log.error("Bad Request : Consistency Group has Snapshot ");
return CinderApiUtils.createErrorResponse(400, "Bad Request : Consistency Group has Snapshot ");
}
blockConsistencyGroupId = blockConsistencyGroup.getId();
if (blockConsistencyGroup.getTag() != null && consistencygroup_id.equals(blockConsistencyGroupId.toString().split(":")[3])) {
for (ScopedLabel tag : blockConsistencyGroup.getTag()) {
if (tag.getScope().equals("volume_types")) {
if (tag.getLabel().equals(volume_type)) {
hasConsistencyGroup = true;
} else {
return CinderApiUtils.createErrorResponse(404, "Invalid volume: No consistency group exist for volume : " + param.volume.display_name);
}
}
}
} else {
return CinderApiUtils.createErrorResponse(404, "Invalid Consistency Group Id : No Such Consistency group exists");
}
}
BlockSnapshot snapshot = null;
URI snapUri = null;
if (snapshotId != null) {
snapshot = (BlockSnapshot) getCinderHelper().queryByTag(URI.create(snapshotId), getUserFromContext(), BlockSnapshot.class);
if (snapshot == null) {
_log.error("Invalid snapshot id ={} ", snapshotId);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid snapshot id" + snapshotId);
} else {
snapUri = snapshot.getId();
URI varrayUri = snapshot.getVirtualArray();
if (varray == null) {
varray = _dbClient.queryObject(VirtualArray.class, varrayUri);
}
}
}
if (varray != null)
_log.info("Create volume: varray = {}", varray.getLabel());
String name = null;
String description = null;
_log.info("is isV1Call {}", isV1Call);
_log.info("name = {}, description = {}", name, description);
if (isV1Call != null) {
name = param.volume.display_name;
description = param.volume.display_description;
} else {
name = param.volume.name;
description = param.volume.description;
}
if (name == null) {
name = "volume-" + RandomStringUtils.random(10);
}
_log.info("param.volume.name = {}, param.volume.display_name = {}", param.volume.name, param.volume.display_name);
_log.info("param.volume.description = {}, param.volume.display_description = {}", param.volume.description, param.volume.display_description);
if (name == null || (name.length() <= 2))
throw APIException.badRequests.parameterIsNotValid(name);
URI projectUri = project.getId();
checkForDuplicateName(name, Volume.class, projectUri, "project", _dbClient);
// Step 2: Check if the user has rights for volume create
verifyUserIsAuthorizedForRequest(project, vpool, varray);
// Step 3: Check capacity Quotas
_log.debug(" volume name = {}, size = {} GB", name, param.volume.size);
int volumeCount = 1;
VolumeCreate volumeCreate = new VolumeCreate(name, Long.toString(requestedSize), volumeCount, vpool.getId(), varray.getId(), project.getId());
BlockServiceApi api = getBlockServiceImpl(vpool, _dbClient);
CapacityUtils.validateQuotasForProvisioning(_dbClient, vpool, project, tenant, requestedSize, "volume");
// Step 4: Call out placementManager to get the recommendation for placement.
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, volumeCount);
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, requestedSize);
// Create a unique task id if one is not passed in the request.
String task = UUID.randomUUID().toString();
TaskList tasklist = null;
BlockFullCopyManager blkFullCpManager = new BlockFullCopyManager(_dbClient, _permissionsHelper, _auditMgr, _coordinator, _placementManager, sc, uriInfo, _request, null);
if (hasConsistencyGroup && blockConsistencyGroupId != null) {
try {
checkForConsistencyGroup(vpool, blockConsistencyGroup, project, api, varray, capabilities, blkFullCpManager);
volumeCreate.setConsistencyGroup(blockConsistencyGroupId);
} catch (APIException exp) {
return CinderApiUtils.createErrorResponse(400, "Bad Request : can't create volume for the consistency group : " + blockConsistencyGroupId);
}
}
if (sourceVolId != null) {
_log.debug("Creating New Volume from Volume : Source volume ID ={}", sourceVolId);
if (sourceVolume != null) {
Volume vol = findVolume(sourceVolId, openstackTenantId);
if (vol == null) {
_log.debug("Creating Clone Volume failed : Invalid source volume id ");
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid source volume id" + sourceVolId);
}
tasklist = volumeClone(name, project, sourceVolId, varray, volumeCount, sourceVolume, blkFullCpManager);
} else {
_log.debug("Creating Clone Volume failed : Null Source volume ");
return CinderApiUtils.createErrorResponse(404, "Not Found : Null source volume ");
}
} else if (snapshotId != null) {
_log.debug("Creating New Volume from Snapshot ID ={}", snapshotId);
tasklist = volumeFromSnapshot(name, project, snapshotId, varray, param, volumeCount, blkFullCpManager, snapUri, snapshot);
} else if ((snapshotId == null) && (sourceVolId == null)) {
_log.debug("Creating New Volume where snapshotId and sourceVolId are null");
tasklist = newVolume(volumeCreate, project, api, capabilities, varray, task, vpool, param, volumeCount, requestedSize, name);
}
if (imageId != null) {
_log.debug("Creating New Volume from imageid ={}", imageId);
// will be implemented
tasklist = volumeFromImage(name, project, varray, param, volumeCount, blkFullCpManager, imageId);
}
if (!(tasklist.getTaskList().isEmpty())) {
for (TaskResourceRep rep : tasklist.getTaskList()) {
URI volumeUri = rep.getResource().getId();
Volume vol = _dbClient.queryObject(Volume.class, volumeUri);
if (vol != null) {
StringMap extensions = vol.getExtensions();
if (extensions == null)
extensions = new StringMap();
extensions.put("display_description", (description == null) ? "" : description);
vol.setExtensions(extensions);
ScopedLabelSet tagSet = new ScopedLabelSet();
vol.setTag(tagSet);
String[] splits = volumeUri.toString().split(":");
String tagName = splits[3];
if (tagName == null || tagName.isEmpty() || tagName.length() < 2) {
throw APIException.badRequests.parameterTooShortOrEmpty("Tag", 2);
}
URI tenantOwner = vol.getTenant().getURI();
ScopedLabel tagLabel = new ScopedLabel(tenantOwner.toString(), tagName);
tagSet.add(tagLabel);
_dbClient.updateAndReindexObject(vol);
if (isV1Call != null) {
_log.debug("Inside V1 call");
return CinderApiUtils.getCinderResponse(getVolumeDetail(vol, isV1Call, openstackTenantId), header, true, CinderConstants.STATUS_OK);
} else {
return CinderApiUtils.getCinderResponse(getVolumeDetail(vol, isV1Call, openstackTenantId), header, true, CinderConstants.STATUS_ACCEPT);
}
} else {
throw APIException.badRequests.parameterIsNullOrEmpty("Volume");
}
}
}
return CinderApiUtils.getCinderResponse(new VolumeDetail(), header, true, CinderConstants.STATUS_ACCEPT);
}
Aggregations