use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method prepareRecommendedVolumes.
/**
* Prepare Recommended Volumes for Protected scenarios only.
*
* This method is responsible for acting the same as the unprotected "prepareRecommendedVolumes" call,
* however it needs to create multiple volumes per single volume requests in order to generate protection.
*
* Those most typical scenario is, that for any one volume requested in a CRR configuration, we create:
* 1. One Source Volume
* 2. One Source Journal Volume (minimum 10GB, otherwise 2.5X source size)
* 3. One Target Volume on protection varray
* 4. One Target Journal Volume on protection varray
*
* In a CLR configuration, there are additional volumes created for the Local Target and Local Target Journal.
*
* This method will assemble a ProtectionSet object in Cassandra that will describe the Protection that
* will be created on the Protection System.
*
* When other protection mechanisms come on board, the RP-ness of this method will need to be pulled out.
*
* @param param volume create request
* @param task task from request or generated
* @param taskList task list
* @param project project from request
* @param originalVarray varray from request
* @param originalVpool vpool from request
* @param numberOfVolumesInRequest volume count from the request
* @param recommendations list of resulting recommendations from placement
* @param consistencyGroup consistency group ID
* @param capabilities Capabilities object
* @param descriptors List of descriptors to be populated
* @param volumeURIs List to hold volumes that have been prepared
*/
private void prepareRecommendedVolumes(VolumeCreate param, String task, TaskList taskList, Project project, VirtualArray originalVarray, VirtualPool originalVpool, Integer numberOfVolumesInRequest, List<Recommendation> recommendations, String volumeLabel, VirtualPoolCapabilityValuesWrapper capabilities, List<VolumeDescriptor> descriptors, List<URI> volumeURIs) throws APIException {
boolean isChangeVpool = false;
boolean isChangeVpoolForProtectedVolume = false;
boolean isSrcAndHaSwapped = VirtualPool.isRPVPlexProtectHASide(originalVpool);
boolean metroPointEnabled = VirtualPool.vPoolSpecifiesMetroPoint(originalVpool);
// This copy of capabilities object is meant to be used by all volume prepares that require changing data,
// which is our case is TARGET and JOURNALS. SOURCE will use always use the main capabilities object.
VirtualPoolCapabilityValuesWrapper copyOfCapabilities = new VirtualPoolCapabilityValuesWrapper(capabilities);
// Set the volume name from the param
String volumeName = volumeLabel;
// Need to check if we should swap src and ha, call the block scheduler code to
// find out. Nothing will be changed for MetroPoint.
VirtualArray haVarray = null;
VirtualPool haVpool = null;
SwapContainer container = this.getBlockScheduler().new SwapContainer();
container.setSrcVarray(originalVarray);
container.setSrcVpool(originalVpool);
container.setHaVarray(haVarray);
container.setHaVpool(haVpool);
container = RecoverPointScheduler.initializeSwapContainer(container, _dbClient);
// Use the new references post swap
VirtualArray varray = container.getSrcVarray();
VirtualPool vpool = container.getSrcVpool();
// Save a reference to the CG, we'll need this later
BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// Total volumes to be created
int totalVolumeCount = 0;
// Create an entire Protection object for each recommendation result.
Iterator<Recommendation> recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
RPProtectionRecommendation rpProtectionRec = (RPProtectionRecommendation) recommendationsIter.next();
URI protectionSystemURI = rpProtectionRec.getProtectionDevice();
URI changeVpoolVolumeURI = rpProtectionRec.getVpoolChangeVolume();
Volume changeVpoolVolume = (changeVpoolVolumeURI == null ? null : _dbClient.queryObject(Volume.class, changeVpoolVolumeURI));
isChangeVpool = (changeVpoolVolumeURI != null);
isChangeVpoolForProtectedVolume = rpProtectionRec.isVpoolChangeProtectionAlreadyExists();
boolean addJournalForStandbySourceCopy = capabilities.getAddJournalCapacity() && (rpProtectionRec.getStandbyJournalRecommendation() != null);
String newVolumeLabel = volumeName;
// Find the Source RP Copy Name
String sourceCopyName = retrieveRpCopyName(originalVpool, varray, consistencyGroup, true);
String standbySourceCopyName = "";
if (addJournalForStandbySourceCopy) {
// Find the Source Standby RP Copy Name - for add journal operation
standbySourceCopyName = retrieveRpCopyName(originalVpool, varray, consistencyGroup, true);
}
if (metroPointEnabled) {
// Find the Source Standby RP Copy Name - for MetorPoint
haVarray = _dbClient.queryObject(VirtualArray.class, VPlexUtil.getHAVarray(originalVpool));
standbySourceCopyName = retrieveRpCopyName(originalVpool, haVarray, consistencyGroup, true);
}
StringBuffer volumeInfoBuffer = new StringBuffer();
volumeInfoBuffer.append(String.format(NEW_LINE));
// Prepare the Journals first
try {
prepareRpJournals(rpProtectionRec, project, consistencyGroup, vpool, originalVpool, param, numberOfVolumesInRequest, newVolumeLabel, isChangeVpoolForProtectedVolume, copyOfCapabilities, protectionSystemURI, taskList, task, descriptors, volumeURIs, volumeInfoBuffer, sourceCopyName, standbySourceCopyName);
} catch (Exception e) {
_log.error("Error trying to prepare RP Journal volumes", e);
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(newVolumeLabel);
}
// Prepare the source and targets
if (rpProtectionRec.getSourceRecommendations() != null) {
for (RPRecommendation sourceRec : rpProtectionRec.getSourceRecommendations()) {
// Get a reference to all existing VPLEX Source volumes (if any)
List<Volume> allSourceVolumesInCG = BlockConsistencyGroupUtils.getActiveVplexVolumesInCG(consistencyGroup, _dbClient, Volume.PersonalityTypes.SOURCE);
// first MP volume of a new CG.
if (metroPointEnabled && allSourceVolumesInCG.isEmpty()) {
validateMetroPointType(sourceRec.getMetroPointType());
}
// Get the number of volumes needed to be created for this recommendation.
int volumeCountInRec = sourceRec.getResourceCount();
// All source volumes will share the same secondary journal.
if (isChangeVpoolForProtectedVolume) {
_log.info(String.format("Change Virtual Pool Protected: %d existing source volume(s) in CG [%s](%s) are affected.", allSourceVolumesInCG.size(), consistencyGroup.getLabel(), consistencyGroup.getId()));
// Force the count to the number of existing source volumes in the CG.
volumeCountInRec = allSourceVolumesInCG.size();
}
// Grab a handle of the haRec, it could be null which is Ok.
RPRecommendation haRec = sourceRec.getHaRecommendation();
for (int volumeCount = 0; volumeCount < volumeCountInRec; volumeCount++) {
// Let's not get into multiple of multiples, this class will handle multi volume creates.
// So force the incoming VolumeCreate param to be set to 1 always from here on.
sourceRec.setResourceCount(1);
if (haRec != null) {
haRec.setResourceCount(1);
}
newVolumeLabel = generateDefaultVolumeLabel(volumeName, totalVolumeCount, numberOfVolumesInRequest);
// Grab the existing volume and task object from the incoming task list
Volume preCreatedVolume = StorageScheduler.getPrecreatedVolume(_dbClient, taskList, newVolumeLabel);
// Assemble a Replication Set; A Collection of volumes. One production, and any number of
// targets.
String rsetName = "RSet-" + newVolumeLabel;
// Increment total volume count
totalVolumeCount++;
// This name has to remain unique, especially when the number of volumes requested to be created
// is more than 1.
param.setName(newVolumeLabel);
Volume sourceVolume = null;
// /////// SOURCE ///////////
if (!isChangeVpoolForProtectedVolume) {
if (isChangeVpool) {
_log.info(String.format("Change Vpool, use existing Source Volume [%s].", changeVpoolVolume.getLabel()));
} else {
_log.info("Create RP Source Volume...");
}
// Create the source
sourceVolume = createRecoverPointVolume(sourceRec, newVolumeLabel, project, capabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.SOURCE, rsetName, preCreatedVolume, null, taskList, task, sourceCopyName, descriptors, changeVpoolVolume, isChangeVpool, isSrcAndHaSwapped, true);
} else {
if (metroPointEnabled) {
_log.info("Upgrade to MetroPoint operation...");
// in the CG to reference the newly created stand-by journal.
for (Volume sourceVol : allSourceVolumesInCG) {
_log.info(String.format("Update the source volume [%s](%s) with new standby journal.", sourceVol.getLabel(), sourceVol.getId()));
// All RP+VPLEX Metro volumes in this CG need to have their backing volume
// references updated with the internal site names for exports.
setInternalSitesForSourceBackingVolumes(sourceRec, haRec, sourceVol, true, false, originalVpool.getHaVarrayConnectedToRp(), sourceCopyName, standbySourceCopyName);
// We need to have all the existing RP+VPLEX Metro volumes from the CG
// added to the volumeURI list so we can properly export the standby
// leg to RP for each volume.
volumeURIs.add(sourceVol.getId());
}
} else {
// NOTE: Upgrade to MetroPoint is (currently) the only supported Change Virtual Pool Protected
// operation, so if we have a null standby journal we're in real trouble.
_log.error("Error trying to upgrade to MetroPoint. Standby journal is null.");
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(newVolumeLabel);
}
// past this point.
break;
}
volumeURIs.add(sourceVolume.getId());
// NOTE: This is only needed for MetroPoint and Distributed RP+VPLEX(HA as RP source),
// nothing will happen for regular RP volumes.
//
// Source volumes need to have their backing volumes set with the correct internal
// site name. The reason for this is so we know later on where to export the volumes to.
//
// This is very evident with MetroPoint as we need to export BOTH sides of the VPLEX Distributed
// Volume.
//
// This is less evident with Distributed RP+VPLEX that has "HA as RP source" set.
// In this case we need to set it on the HA volume as that is the side to export (not the source
// side).
// To do this we need to pass in a hint...
// We need the (unswapped) original vpool and we then check the getHaVarrayConnectedToRp() value
// which tells us
// which side(varray) to export.
// This value will only be used if isSrcAndHaSwapped == true.
setInternalSitesForSourceBackingVolumes(sourceRec, haRec, sourceVolume, metroPointEnabled, isSrcAndHaSwapped, originalVpool.getHaVarrayConnectedToRp(), sourceCopyName, standbySourceCopyName);
// /////// TARGET(S) ///////////
List<URI> protectionTargets = new ArrayList<URI>();
for (RPRecommendation targetRec : sourceRec.getTargetRecommendations()) {
// Keep track of the targets created
protectionTargets.add(targetRec.getVirtualArray());
// Grab the target's varray
VirtualArray targetVirtualArray = _dbClient.queryObject(VirtualArray.class, targetRec.getVirtualArray());
_log.info(String.format("Create Target (%s)...", targetVirtualArray.getLabel()));
// to provision this target.
if (isChangeVpoolForProtectedVolume) {
Volume alreadyProvisionedTarget = RPHelper.findAlreadyProvisionedTargetVolume(changeVpoolVolume, targetRec.getVirtualArray(), _dbClient);
if (alreadyProvisionedTarget != null) {
_log.info(String.format("Existing target volume [%s] found for varray [%s].", alreadyProvisionedTarget.getLabel(), targetVirtualArray.getLabel()));
// No need to go further, continue on to the next target varray
continue;
}
}
// Generate target volume name
String targetVolumeName = new StringBuilder(newVolumeLabel).append(VOLUME_TYPE_TARGET + targetVirtualArray.getLabel()).toString();
// Create the target
Volume targetVolume = createRecoverPointVolume(targetRec, targetVolumeName, project, copyOfCapabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.TARGET, rsetName, null, sourceVolume, taskList, task, targetRec.getRpCopyName(), descriptors, null, false, false, false);
volumeInfoBuffer.append(logVolumeInfo(targetVolume));
volumeURIs.add(targetVolume.getId());
}
// /////// METROPOINT LOCAL TARGET(S) ///////////
if (metroPointEnabled && haRec.getTargetRecommendations() != null && !haRec.getTargetRecommendations().isEmpty()) {
// then we need to create targets for the second (stand-by) leg.
for (RPRecommendation standbyTargetRec : haRec.getTargetRecommendations()) {
// Grab the MP target's varray
VirtualArray standyTargetVirtualArray = _dbClient.queryObject(VirtualArray.class, standbyTargetRec.getVirtualArray());
_log.info(String.format("Create Standby Target (%s)..", standyTargetVirtualArray.getLabel()));
// source recommendation.
if (protectionTargets.contains(standbyTargetRec.getVirtualArray())) {
continue;
}
// standby.
if (isChangeVpoolForProtectedVolume) {
Volume alreadyProvisionedTarget = RPHelper.findAlreadyProvisionedTargetVolume(changeVpoolVolume, standyTargetVirtualArray.getId(), _dbClient);
if (alreadyProvisionedTarget != null) {
_log.info(String.format("Existing target volume [%s] found for varray [%s].", alreadyProvisionedTarget.getLabel(), standyTargetVirtualArray.getLabel()));
// No need to go further, continue on to the next target varray
continue;
}
}
// Generate standby target label
String standbyTargetVolumeName = new StringBuilder(newVolumeLabel).append(VOLUME_TYPE_TARGET + standyTargetVirtualArray.getLabel()).toString();
// Create the standby target
Volume standbyTargetVolume = createRecoverPointVolume(standbyTargetRec, standbyTargetVolumeName, project, copyOfCapabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.TARGET, rsetName, null, sourceVolume, taskList, task, standbyTargetRec.getRpCopyName(), descriptors, null, false, false, false);
volumeInfoBuffer.append(logVolumeInfo(standbyTargetVolume));
volumeURIs.add(standbyTargetVolume.getId());
}
}
// Hold off on logging the source volume until we're done creating the targets
volumeInfoBuffer.append(logVolumeInfo(sourceVolume));
}
}
volumeInfoBuffer.append(String.format(NEW_LINE));
_log.info(volumeInfoBuffer.toString());
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockConsistencyGroupService method deactivateConsistencyGroupSnapshot.
/**
* Deactivate the specified Consistency Group Snapshot
*
* @prereq none
*
* @param consistencyGroupId
* - Consistency group URI
* @param snapshotId
* - Consistency group snapshot URI
*
* @brief Deactivate consistency group snapshot session
* @return TaskResourceRep
*/
@POST
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/protection/snapshots/{sid}/deactivate")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.ANY })
public TaskList deactivateConsistencyGroupSnapshot(@PathParam("id") final URI consistencyGroupId, @PathParam("sid") final URI snapshotId) {
final BlockConsistencyGroup consistencyGroup = (BlockConsistencyGroup) queryResource(consistencyGroupId);
// Snapshots of RecoverPoint consistency groups is not supported.
if (isIdEmbeddedInURL(consistencyGroupId) && consistencyGroup.checkForType(Types.RP)) {
throw APIException.badRequests.snapshotsNotSupportedForRPCGs();
}
// check for backend CG
if (BlockConsistencyGroupUtils.getLocalSystemsInCG(consistencyGroup, _dbClient).isEmpty()) {
_log.error("{} Group Snapshot operations not supported when there is no backend CG", consistencyGroup.getId());
throw APIException.badRequests.cannotCreateSnapshotOfCG();
}
final BlockSnapshot snapshot = (BlockSnapshot) queryResource(snapshotId);
verifySnapshotIsForConsistencyGroup(snapshot, consistencyGroup);
// We can ignore dependencies on BlockSnapshotSession. In this case
// the BlockSnapshot instance is a linked target for a BlockSnapshotSession
// and we will unlink the snapshot from the session and delete it.
List<Class<? extends DataObject>> excludeTypes = new ArrayList<Class<? extends DataObject>>();
excludeTypes.add(BlockSnapshotSession.class);
ArgValidator.checkReference(BlockSnapshot.class, snapshotId, checkForDelete(snapshot, excludeTypes));
// Snapshot session linked targets must be unlinked instead.
BlockSnapshotSession session = BlockSnapshotSessionUtils.getLinkedTargetSnapshotSession(snapshot, _dbClient);
if (session != null) {
return deactivateAndUnlinkTargetVolumesForSession(session, snapshot);
}
// Generate task id
final String task = UUID.randomUUID().toString();
TaskList response = new TaskList();
// Not an error if the snapshot we try to delete is already deleted
if (snapshot.getInactive()) {
Operation op = new Operation();
op.ready("The consistency group snapshot has already been deactivated");
op.setResourceType(ResourceOperationTypeEnum.DELETE_CONSISTENCY_GROUP_SNAPSHOT);
_dbClient.createTaskOpStatus(BlockSnapshot.class, snapshot.getId(), task, op);
response.getTaskList().add(toTask(snapshot, task, op));
return response;
}
List<BlockSnapshot> snapshots = new ArrayList<BlockSnapshot>();
snapshots = ControllerUtils.getSnapshotsPartOfReplicationGroup(snapshot, _dbClient);
// Get the snapshot parent volume.
Volume parentVolume = _permissionsHelper.getObjectById(snapshot.getParent(), Volume.class);
// Check that there are no pending tasks for these snapshots.
checkForPendingTasks(Arrays.asList(parentVolume.getTenant().getURI()), snapshots);
for (BlockSnapshot snap : snapshots) {
Operation snapOp = _dbClient.createTaskOpStatus(BlockSnapshot.class, snap.getId(), task, ResourceOperationTypeEnum.DEACTIVATE_VOLUME_SNAPSHOT);
response.getTaskList().add(toTask(snap, task, snapOp));
}
addConsistencyGroupTask(consistencyGroup, response, task, ResourceOperationTypeEnum.DEACTIVATE_CONSISTENCY_GROUP_SNAPSHOT);
try {
BlockServiceApi blockServiceApiImpl = BlockService.getBlockServiceImpl(parentVolume, _dbClient);
blockServiceApiImpl.deleteSnapshot(snapshot, snapshots, task, VolumeDeleteTypeEnum.FULL.name());
} catch (APIException | InternalException e) {
String errorMsg = String.format("Exception attempting to delete snapshot %s: %s", snapshot.getId(), e.getMessage());
_log.error(errorMsg);
for (TaskResourceRep taskResourceRep : response.getTaskList()) {
taskResourceRep.setState(Operation.Status.error.name());
taskResourceRep.setMessage(errorMsg);
@SuppressWarnings({ "unchecked" }) Class<? extends DataObject> clazz = URIUtil.getModelClass(taskResourceRep.getResource().getId());
_dbClient.error(clazz, taskResourceRep.getResource().getId(), task, e);
}
throw e;
} catch (Exception e) {
String errorMsg = String.format("Exception attempting to delete snapshot %s: %s", snapshot.getId(), e.getMessage());
_log.error(errorMsg);
APIException apie = APIException.internalServerErrors.genericApisvcError(errorMsg, e);
for (TaskResourceRep taskResourceRep : response.getTaskList()) {
taskResourceRep.setState(Operation.Status.error.name());
taskResourceRep.setMessage(apie.getMessage());
@SuppressWarnings("unchecked") Class<? extends DataObject> clazz = URIUtil.getModelClass(taskResourceRep.getResource().getId());
_dbClient.error(clazz, taskResourceRep.getResource().getId(), task, apie);
}
throw apie;
}
auditBlockConsistencyGroup(OperationTypeEnum.DELETE_CONSISTENCY_GROUP_SNAPSHOT, AuditLogManager.AUDITLOG_SUCCESS, AuditLogManager.AUDITOP_BEGIN, snapshot.getId().toString(), snapshot.getLabel());
return response;
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockConsistencyGroupService method deleteConsistencyGroup.
/**
* Deletes a consistency group
*
* Do not delete if snapshots exist for consistency group
*
* @prereq Dependent snapshot resources must be deleted
*
* @param id the URN of a ViPR Consistency group
*
* @brief Delete consistency group
* @return TaskResourceRep
*
* @throws InternalException
*/
@POST
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/deactivate")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.OWN, ACL.ALL })
public TaskResourceRep deleteConsistencyGroup(@PathParam("id") final URI id, @DefaultValue("FULL") @QueryParam("type") String type) throws InternalException {
// Query for the given consistency group and verify it is valid.
final BlockConsistencyGroup consistencyGroup = (BlockConsistencyGroup) queryResource(id);
ArgValidator.checkReference(BlockConsistencyGroup.class, id, checkForDelete(consistencyGroup));
// Create a unique task identifier.
String task = UUID.randomUUID().toString();
// So, we do need to verify that no volumes reference the CG.
if (deletingUncreatedConsistencyGroup(consistencyGroup) || VolumeDeleteTypeEnum.VIPR_ONLY.name().equals(type)) {
markCGForDeletion(consistencyGroup);
return finishDeactivateTask(consistencyGroup, task);
}
// Otherwise, we need to clean up the array consistency groups.
TaskResourceRep taskRep = null;
try {
List<StorageSystem> vplexSystems = BlockConsistencyGroupUtils.getVPlexStorageSystems(consistencyGroup, _dbClient);
if (!vplexSystems.isEmpty()) {
// If there is a VPLEX system, then we simply call the VPLEX controller which
// will delete all VPLEX CGS on all VPLEX systems, and also all local CGs on
// all local systems.
BlockServiceApi blockServiceApi = getBlockServiceImpl(DiscoveredDataObject.Type.vplex.name());
taskRep = blockServiceApi.deleteConsistencyGroup(vplexSystems.get(0), consistencyGroup, task);
} else {
// Otherwise, we call the block controller to delete the local CGs on all local systems.
List<URI> localSystemURIs = BlockConsistencyGroupUtils.getLocalSystems(consistencyGroup, _dbClient);
if (!localSystemURIs.isEmpty()) {
boolean foundSystem = false;
for (URI localSystemURI : localSystemURIs) {
StorageSystem localSystem = _dbClient.queryObject(StorageSystem.class, localSystemURI);
if (localSystem != null) {
foundSystem = true;
BlockServiceApi blockServiceApi = getBlockServiceImpl(BLOCKSERVICEAPIIMPL_GROUP);
taskRep = blockServiceApi.deleteConsistencyGroup(localSystem, consistencyGroup, task);
if (Task.Status.error.name().equals(taskRep.getState())) {
break;
}
} else {
_log.warn("Local system {} for consistency group {} does not exist", localSystemURI, consistencyGroup.getLabel());
}
}
// Check to make sure we found at least one of these local systems.
if (!foundSystem) {
// For some reason we have a CG with local systems, but none of them
// are in the database. In this case, we will log a warning and mark
// it for deletion.
_log.warn("Deleting created consistency group {} where none of the local systems for the group exist", consistencyGroup.getLabel());
markCGForDeletion(consistencyGroup);
return finishDeactivateTask(consistencyGroup, task);
}
} else {
// For some reason the CG has no VPLEX or local systems but is
// marked as being active and created. In this case, we will log
// a warning and mark it for deletion.
_log.info("Deleting created consistency group {} with no local or VPLEX systems", consistencyGroup.getLabel());
markCGForDeletion(consistencyGroup);
return finishDeactivateTask(consistencyGroup, task);
}
}
} catch (APIException | InternalException e) {
String errorMsg = String.format("Exception attempting to delete consistency group %s: %s", consistencyGroup.getLabel(), e.getMessage());
_log.error(errorMsg);
taskRep.setState(Operation.Status.error.name());
taskRep.setMessage(errorMsg);
_dbClient.error(BlockConsistencyGroup.class, taskRep.getResource().getId(), task, e);
} catch (Exception e) {
String errorMsg = String.format("Exception attempting to delete consistency group %s: %s", consistencyGroup.getLabel(), e.getMessage());
_log.error(errorMsg);
APIException apie = APIException.internalServerErrors.genericApisvcError(errorMsg, e);
taskRep.setState(Operation.Status.error.name());
taskRep.setMessage(apie.getMessage());
_dbClient.error(BlockConsistencyGroup.class, taskRep.getResource().getId(), task, apie);
}
// the request was successful.
if (Task.Status.ready.name().equals(taskRep.getState())) {
markCGForDeletion(consistencyGroup);
}
return taskRep;
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockService method changeVolumeVirtualPool.
/**
* Allows the caller to change the virtual pool for the volume identified in
* the request. Currently, the only virtual pool changes that are supported
* are as follows:
*
* Change the virtual pool for a VPLEX virtual volume. This virtual pool
* change would allow the caller to change the types of drives, for example,
* used for the backend volume(s) that are used by the virtual volume.
*
* Change the virtual pool for a VPLEX virtual volume, such that a local
* VPLEX virtual volumes becomes a distributed VPLEX virtual volume.
*
* Change the virtual pool of a VMAX or VNX Block volume to make the volume
* a local or distributed VPLEX virtual volume. Essentially, the volume
* becomes the backend volume for a VPLEX virtual volume. Similar to
* creating a virtual volume, but instead of creating a new backend volume,
* using the volume identified in the request. The VMAX or VNX volume cannot
* currently be exported for this change.
*
* Change the virtual pool of a VMAX or VNX Block volume to make the volume
* a RecoverPoint protected volume. The volume must be able to stay put, and
* ViPR will build a protection around it.
*
* Change the virtual pool of a VMAX or VNX Block volume to allow native
* continuous copies to be created for it.
*
* Change the virtual pool of a volume to increase the export path parameter max_paths.
* The number of paths will be upgraded if possible for all Export Groups / Export Masks
* containing this volume. If the volume is not currently exported, max_paths can be
* decreased or paths_per_initiator can be changed. Note that changing max_paths does
* not have any effect on the export of BlockSnapshots that were created from this volume.
*
* Change the virtual pool of a VMAX and VNX volume to allow change of Auto-tiering policy
* associated with it.
* <p>
* Since this method has been deprecated use POST /block/volumes/vpool-change
*
* @brief Change the virtual pool for a volume.
*
* @prereq none
*
* @param id
* the URN of a ViPR volume.
* @param param
* The parameter specifying the new virtual pool.
* @return A TaskResourceRep representing the virtual pool change for the
* volume.
* @throws InternalException,
* APIException
*/
@PUT
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/vpool")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.OWN, ACL.ALL })
@Deprecated
public TaskResourceRep changeVolumeVirtualPool(@PathParam("id") URI id, VirtualPoolChangeParam param) throws InternalException, APIException {
_log.info("Request to change VirtualPool for volume {}", id);
// Get the volume.
ArgValidator.checkFieldUriType(id, Volume.class, "id");
Volume volume = queryVolumeResource(id);
_log.info("Found volume");
// Don't operate on VPLEX backend or RP Journal volumes.
BlockServiceUtils.validateNotAnInternalBlockObject(volume, false);
// Don't operate on ingested volumes.
VolumeIngestionUtil.checkOperationSupportedOnIngestedVolume(volume, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VPOOL, _dbClient);
// Get the project.
URI projectURI = volume.getProject().getURI();
Project project = _permissionsHelper.getObjectById(projectURI, Project.class);
ArgValidator.checkEntity(project, projectURI, false);
_log.info("Found volume project {}", projectURI);
// Verify the user is authorized for the volume's project.
BlockServiceUtils.verifyUserIsAuthorizedForRequest(project, getUserFromContext(), _permissionsHelper);
_log.info("User is authorized for volume's project");
// Get the VirtualPool for the request and verify that the
// project's tenant has access to the VirtualPool.
VirtualPool vpool = getVirtualPoolForRequest(project, param.getVirtualPool(), _dbClient, _permissionsHelper);
_log.info("Found new VirtualPool {}", vpool.getId());
// Verify that the VirtualPool change is allowed for the
// requested volume and VirtualPool.
verifyVirtualPoolChangeSupportedForVolumeAndVirtualPool(volume, vpool);
_log.info("VirtualPool change is supported for requested volume and VirtualPool");
verifyAllVolumesInCGRequirement(Arrays.asList(volume), vpool);
// verify quota
if (!CapacityUtils.validateVirtualPoolQuota(_dbClient, vpool, volume.getProvisionedCapacity())) {
throw APIException.badRequests.insufficientQuotaForVirtualPool(vpool.getLabel(), "volume");
}
// Create a unique task id.
String taskId = UUID.randomUUID().toString();
Operation op = _dbClient.createTaskOpStatus(Volume.class, id, taskId, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VPOOL);
// execute the VirtualPool update on the volume.
try {
BlockServiceApi blockServiceAPI = getBlockServiceImplForVirtualPoolChange(volume, vpool);
_log.info("Got block service implementation for VirtualPool change request");
blockServiceAPI.changeVolumeVirtualPool(Arrays.asList(volume), vpool, param, taskId);
_log.info("Executed VirtualPool change for volume.");
} catch (InternalException | APIException e) {
String errorMsg = String.format("Volume VirtualPool change error: %s", e.getMessage());
op = new Operation(Operation.Status.error.name(), errorMsg);
_dbClient.updateTaskOpStatus(Volume.class, id, taskId, op);
throw e;
}
auditOp(OperationTypeEnum.CHANGE_VOLUME_VPOOL, true, AuditLogManager.AUDITOP_BEGIN, volume.getLabel(), 1, volume.getVirtualArray().toString(), volume.getProject().toString());
return toTask(volume, taskId, op);
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockService method createVolumeTaskList.
/**
* A method that pre-creates task and volume objects to return to the caller of the API.
*
* @param size
* size of the volume
* @param project
* project of the volume
* @param varray
* virtual array of the volume
* @param vpool
* virtual pool of the volume
* @param label
* label
* @param task
* task string
* @param volumeCount
* number of volumes requested
* @return a list of tasks associated with this request
*/
private TaskList createVolumeTaskList(String size, Project project, VirtualArray varray, VirtualPool vpool, String label, String task, Integer volumeCount) {
TaskList taskList = new TaskList();
try {
// For each volume requested, pre-create a volume object/task object
long lsize = SizeUtil.translateSize(size);
for (int i = 0; i < volumeCount; i++) {
Volume volume = StorageScheduler.prepareEmptyVolume(_dbClient, lsize, project, varray, vpool, label, i, volumeCount);
Operation op = _dbClient.createTaskOpStatus(Volume.class, volume.getId(), task, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME);
volume.getOpStatus().put(task, op);
TaskResourceRep volumeTask = toTask(volume, task, op);
taskList.getTaskList().add(volumeTask);
_log.info(String.format("Volume and Task Pre-creation Objects [Init]-- Source Volume: %s, Task: %s, Op: %s", volume.getId(), volumeTask.getId(), task));
}
} catch (APIException ex) {
// Mark the dummy objects inactive
String errMsg = "Caught Exception while creating Volume and Task objects. Marking pre-created Objects inactive";
_log.error(errMsg, ex);
for (TaskResourceRep taskObj : taskList.getTaskList()) {
taskObj.setMessage(String.format("%s. %s", errMsg, ex.getMessage()));
taskObj.setState(Operation.Status.error.name());
URI volumeURI = taskObj.getResource().getId();
_dbClient.error(Volume.class, volumeURI, task, ex);
// Set the volumes to inactive
Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
volume.setInactive(true);
_dbClient.updateObject(volume);
}
// throw the Exception to the caller
throw ex;
}
return taskList;
}
Aggregations