use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class VolumeIngestionUtil method checkIngestionRequestValidForUnManagedVolumes.
/**
* Validation Steps 1. validate PreExistingVolume uri. 2. Check PreExistingVolume is under
* Bourne Management already. 3. Check whether given vPool is present in the PreExistingVolumes
* Supported vPool List.
*
* @param unManagedVolumes the UnManagedVolumes from the request to validate
* @param vPool the VirtualPool to validate against
* @throws IngestionException
*/
public static void checkIngestionRequestValidForUnManagedVolumes(List<URI> unManagedVolumes, VirtualPool vPool, DbClient dbClient) throws IngestionException {
for (URI unManagedVolumeUri : unManagedVolumes) {
UnManagedVolume unManagedVolume = dbClient.queryObject(UnManagedVolume.class, unManagedVolumeUri);
checkUnmanagedVolumePartiallyDiscovered(unManagedVolume, unManagedVolumeUri);
StringSetMap unManagedVolumeInformation = unManagedVolume.getVolumeInformation();
try {
// Check if UnManagedVolume is CG enabled and VPool is not CG enabled.
if (checkUnManagedResourceAddedToConsistencyGroup(unManagedVolume) && !vPool.getMultivolumeConsistency()) {
_logger.error(String.format("The requested Virtual Pool %s does not have the Multi-Volume Consistency flag set, and unmanage volume %s is part of a consistency group.", vPool.getLabel(), unManagedVolume.getLabel()));
throw APIException.internalServerErrors.unmanagedVolumeVpoolConsistencyGroupMismatch(vPool.getLabel(), unManagedVolume.getLabel());
}
// Check if the UnManagedVolume is a snapshot & Vpool doesn't have snapshotCount defined.
if (isSnapshot(unManagedVolume) && 0 == vPool.getMaxNativeSnapshots()) {
throw APIException.internalServerErrors.noMaxSnapshotsDefinedInVirtualPool(vPool.getLabel(), unManagedVolume.getLabel());
}
// a VPLEX volume and snapshot will not have an associated pool
if (!isVplexVolume(unManagedVolume) && !isSnapshot(unManagedVolume)) {
checkStoragePoolValidForUnManagedVolumeUri(unManagedVolumeInformation, dbClient, unManagedVolumeUri);
}
if (!isVplexBackendVolume(unManagedVolume)) {
checkVPoolValidForGivenUnManagedVolumeUris(unManagedVolumeInformation, unManagedVolume, vPool.getId(), dbClient);
}
} catch (APIException ex) {
_logger.error(ex.getLocalizedMessage());
throw IngestionException.exceptions.validationException(ex.getLocalizedMessage());
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class VolumeGroupService method performVolumeGroupSnapshotSessionOperation.
/*
* Wrapper of BlockConsistencyGroupService methods for snapshot session operations
*
* @param volumeGroupId
*
* @param param
*
* @return a TaskList
*/
private TaskList performVolumeGroupSnapshotSessionOperation(final URI volumeGroupId, final VolumeGroupSnapshotSessionOperationParam param, OperationTypeEnum opType) {
List<BlockSnapshotSession> snapSessions = getSnapshotSessionsGroupedBySnapSessionset(volumeGroupId, param);
// Check for pending tasks
VolumeGroup volumeGroup = _dbClient.queryObject(VolumeGroup.class, volumeGroupId);
if (opType == OperationTypeEnum.RESTORE_VOLUME_GROUP_SNAPSHOT_SESSION) {
checkForApplicationPendingTasks(volumeGroup, _dbClient, true);
} else {
checkForApplicationPendingTasks(volumeGroup, _dbClient, false);
}
auditOp(opType, true, AuditLogManager.AUDITOP_BEGIN, volumeGroupId.toString(), param.getSnapshotSessions());
TaskList taskList = new TaskList();
Table<URI, String, BlockSnapshotSession> storageRgToSnapshot = ControllerUtils.getSnapshotSessionForStorageReplicationGroup(snapSessions, _dbClient);
for (Cell<URI, String, BlockSnapshotSession> cell : storageRgToSnapshot.cellSet()) {
BlockSnapshotSession session = cell.getValue();
log.info("{} for replication group {}", opType.getDescription(), cell.getColumnKey());
ResourceOperationTypeEnum oprEnum = null;
try {
// should not be null
URI cgUri = session.getConsistencyGroup();
URI sessionUri = session.getId();
log.info("CG: {}, Session: {}", cgUri, session.getLabel());
switch(opType) {
case RESTORE_VOLUME_GROUP_SNAPSHOT_SESSION:
oprEnum = ResourceOperationTypeEnum.RESTORE_SNAPSHOT_SESSION;
taskList.addTask(_blockConsistencyGroupService.restoreConsistencyGroupSnapshotSession(cgUri, sessionUri));
break;
case DELETE_VOLUME_GROUP_SNAPSHOT_SESSION:
oprEnum = ResourceOperationTypeEnum.DELETE_CONSISTENCY_GROUP_SNAPSHOT_SESSION;
taskList.getTaskList().addAll(_blockConsistencyGroupService.deactivateConsistencyGroupSnapshotSession(cgUri, sessionUri).getTaskList());
break;
case LINK_VOLUME_GROUP_SNAPSHOT_SESSION_TARGET:
oprEnum = ResourceOperationTypeEnum.LINK_SNAPSHOT_SESSION_TARGETS;
SnapshotSessionLinkTargetsParam linkParam = new SnapshotSessionLinkTargetsParam(((VolumeGroupSnapshotSessionLinkTargetsParam) param).getNewLinkedTargets());
taskList.getTaskList().addAll(_blockConsistencyGroupService.linkTargetVolumes(cgUri, sessionUri, linkParam).getTaskList());
break;
case RELINK_VOLUME_GROUP_SNAPSHOT_SESSION_TARGET:
oprEnum = ResourceOperationTypeEnum.RELINK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_TARGETS;
SnapshotSessionRelinkTargetsParam relinkParam = new SnapshotSessionRelinkTargetsParam(getRelinkTargetIdsForSession((VolumeGroupSnapshotSessionRelinkTargetsParam) param, session, snapSessions.size()));
taskList.getTaskList().addAll(_blockConsistencyGroupService.relinkTargetVolumes(cgUri, sessionUri, relinkParam).getTaskList());
break;
case UNLINK_VOLUME_GROUP_SNAPSHOT_SESSION_TARGET:
oprEnum = ResourceOperationTypeEnum.UNLINK_SNAPSHOT_SESSION_TARGETS;
SnapshotSessionUnlinkTargetsParam unlinkParam = new SnapshotSessionUnlinkTargetsParam(getUnlinkTargetIdsForSession((VolumeGroupSnapshotSessionUnlinkTargetsParam) param, session));
taskList.addTask(_blockConsistencyGroupService.unlinkTargetVolumesForSession(cgUri, sessionUri, unlinkParam));
break;
default:
log.error("Unsupported operation {}", opType.getDescription());
break;
}
} catch (InternalException | APIException e) {
String errMsg = String.format("Exception occurred while performing %s on Replication group %s", opType.getDescription(), cell.getColumnKey());
log.error(errMsg, e);
TaskResourceRep task = BlockServiceUtils.createFailedTaskOnSnapshotSession(_dbClient, session, oprEnum, e);
taskList.addTask(task);
} catch (Exception ex) {
String errMsg = String.format("Unexpected Exception occurred while performing %s on Replication group %s", opType.getDescription(), cell.getColumnKey());
log.error(errMsg, ex);
}
}
auditOp(opType, true, AuditLogManager.AUDITOP_END, volumeGroupId.toString(), param.getSnapshotSessions());
return taskList;
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockService method changeVolumesVirtualPool.
/**
* Allows the caller to change the virtual pool for the volumes identified in
* the request. Currently, the only virtual pool changes that are supported via
* this method are as follows:
*
* Change the virtual pool for a VPLEX virtual volume. This virtual pool
* change would allow the caller to change the types of drives, for example,
* used for the backend volume(s) that are used by the virtual volume.
*
* Change the virtual pool for a VPLEX virtual volume, such that a local
* VPLEX virtual volumes becomes a distributed VPLEX virtual volume.
*
* Change the virtual pool of a VMAX or VNX Block volume to make the volume
* a local or distributed VPLEX virtual volume. Essentially, the volume
* becomes the backend volume for a VPLEX virtual volume. Similar to
* creating a virtual volume, but instead of creating a new backend volume,
* using the volume identified in the request. The VMAX or VNX volume cannot
* currently be exported for this change.
*
* Change the virtual pool of a VMAX or VNX Block volume to make the volume
* a RecoverPoint protected volume. The volume must be able to stay put, and
* ViPR will build a protection around it.
*
* Change the virtual pool of a VMAX or VNX Block volume to allow native
* continuous copies to be created for it.
*
* Change the virtual pool of a volume to increase the export path parameter max_paths.
* The number of paths will be upgraded if possible for all Export Groups / Export Masks
* containing this volume. If the volume is not currently exported, max_paths can be
* decreased or paths_per_initiator can be changed. Note that changing max_paths does
* not have any effect on the export of BlockSnapshots that were created from this volume.
*
* Change the virtual pool of a VMAX and VNX volumes to allow change of Auto-tiering policy
* associated with it.
*
* Note: Operations other than Auto-tiering Policy change will call the
* internal single volume method (BlockServiceApiImpl) in a loop.
*
* @brief Change the virtual pool for the given volumes.
*
* @param param
* the VolumeVirtualPoolChangeParam
* @return A List of TaskResourceRep representing the virtual pool change for the
* volumes.
* @throws InternalException,
* APIException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/vpool-change")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.OWN, ACL.ALL })
public TaskList changeVolumesVirtualPool(VolumeVirtualPoolChangeParam param) throws InternalException, APIException {
// verify volume ids list is provided.
List<URI> ids = param.getVolumes();
ArgValidator.checkFieldNotEmpty(ids, "volumes");
_log.info("Request to change VirtualPool for volumes {}", ids);
List<Volume> volumes = new ArrayList<Volume>();
TaskList taskList = new TaskList();
for (URI id : ids) {
// Get the volume.
ArgValidator.checkFieldUriType(id, Volume.class, "volume");
Volume volume = queryVolumeResource(id);
volumes.add(volume);
// Make sure that we don't have some pending
// operation against the volume
checkForPendingTasks(Arrays.asList(volume.getTenant().getURI()), Arrays.asList(volume));
}
_log.info("Found volumes");
/**
* verify that all volumes belong to same vPool.
*
* If so and vPool change detects it as Auto-tiering policy change,
* then they are of same system type.
*
* Special case: If the request contains a VMAX volume and a VNX volume
* belonging to a generic vPool and the target vPool has some VMAX FAST policy,
* the below verifyVirtualPoolChangeSupportedForVolumeAndVirtualPool() check will
* throw error for VNX volume (saying it does not come under any valid change).
*/
verifyAllVolumesBelongToSameVpool(volumes);
// target vPool
VirtualPool vPool = null;
// total provisioned capacity to check for vPool quota.
long totalProvisionedCapacity = 0;
for (Volume volume : volumes) {
_log.info("Checking on volume: {}", volume.getId());
// Don't operate on VPLEX backend or RP Journal volumes.
BlockServiceUtils.validateNotAnInternalBlockObject(volume, param.getForceFlag());
// Don't operate on ingested volumes.
VolumeIngestionUtil.checkOperationSupportedOnIngestedVolume(volume, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VPOOL, _dbClient);
// Get the project.
URI projectURI = volume.getProject().getURI();
Project project = _permissionsHelper.getObjectById(projectURI, Project.class);
ArgValidator.checkEntity(project, projectURI, false);
_log.info("Found volume project {}", projectURI);
// Verify the user is authorized for the volume's project.
BlockServiceUtils.verifyUserIsAuthorizedForRequest(project, getUserFromContext(), _permissionsHelper);
_log.info("User is authorized for volume's project");
// Get the VirtualPool for the request and verify that the
// project's tenant has access to the VirtualPool.
vPool = getVirtualPoolForRequest(project, param.getVirtualPool(), _dbClient, _permissionsHelper);
_log.info("Found new VirtualPool {}", vPool.getId());
// Verify that the VirtualPool change is allowed for the
// requested volume and VirtualPool.
verifyVirtualPoolChangeSupportedForVolumeAndVirtualPool(volume, vPool);
_log.info("VirtualPool change is supported for requested volume and VirtualPool");
totalProvisionedCapacity += volume.getProvisionedCapacity().longValue();
}
verifyAllVolumesInCGRequirement(volumes, vPool);
// verify target vPool quota
if (!CapacityUtils.validateVirtualPoolQuota(_dbClient, vPool, totalProvisionedCapacity)) {
throw APIException.badRequests.insufficientQuotaForVirtualPool(vPool.getLabel(), "volume");
}
// Create a unique task id.
String taskId = UUID.randomUUID().toString();
// if this vpool request change has a consistency group, set its requested types
if (param.getConsistencyGroup() != null) {
BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, param.getConsistencyGroup());
if (cg != null && !cg.getInactive()) {
cg.getRequestedTypes().addAll(getRequestedTypes(vPool));
_dbClient.updateObject(cg);
}
}
// to execute the VirtualPool update on the volume.
try {
/**
* If it is Auto-tiering policy change, the system type remains same
* between source and target vPools.
* Volumes from single vPool would be of same characteristics and
* all would specify same operation.
*/
BlockServiceApi blockServiceAPI = getBlockServiceImplForVirtualPoolChange(volumes.get(0), vPool);
_log.info("Got block service implementation for VirtualPool change request");
VirtualPoolChangeParam oldParam = convertNewVirtualPoolChangeParamToOldParam(param);
TaskList taskList2 = blockServiceAPI.changeVolumeVirtualPool(volumes, vPool, oldParam, taskId);
if (taskList2 != null && !taskList2.getTaskList().isEmpty()) {
taskList.getTaskList().addAll(taskList2.getTaskList());
}
_log.info("Executed VirtualPool change for given volumes.");
} catch (Exception e) {
String errorMsg = String.format("Volume VirtualPool change error: %s", e.getMessage());
_log.error(errorMsg, e);
if (!taskList.getTaskList().isEmpty()) {
for (TaskResourceRep volumeTask : taskList.getTaskList()) {
volumeTask.setState(Operation.Status.error.name());
volumeTask.setMessage(errorMsg);
_dbClient.updateTaskOpStatus(Volume.class, volumeTask.getResource().getId(), taskId, new Operation(Operation.Status.error.name(), errorMsg));
}
} else {
for (Volume volume : volumes) {
_dbClient.updateTaskOpStatus(Volume.class, volume.getId(), taskId, new Operation(Operation.Status.error.name(), errorMsg));
}
}
throw e;
}
// Record Audit operation.
for (Volume volume : volumes) {
auditOp(OperationTypeEnum.CHANGE_VOLUME_VPOOL, true, AuditLogManager.AUDITOP_BEGIN, volume.getLabel(), 1, volume.getVirtualArray().toString(), volume.getProject().toString());
}
return taskList;
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockService method deleteVolumes.
/**
* This API allows the user to deactivate multiple volumes in a single request.
* There is no restriction on the the volumes specified in the request. The volumes
* can reside in multiple storage pools on multiple storage systems. The
* response will contain a task resource for each volume to be
* deactivated. The volumes will be deleted from the database when
* all references to the volumes of type BlockSnapshot and
* ExportGroup are deleted.
*
* If "?force=true" is added to the path, it will force the delete of internal
* volumes that have the SUPPORTS_FORCE flag.
*
* If "?type=VIPR_ONLY" is added to the path, it will delete volumes only from ViPR data base and leaves the volume on storage array as
* it is.
* Possible value for the attribute type : FULL, VIPR_ONLY
* FULL : Deletes the volumes permanently on array and ViPR data base.
* VIPR_ONLY : Deletes the volumes only from ViPR data base and leaves the volumes on array as it is.
*
* NOTE: This is an asynchronous operation.
*
* @prereq Dependent volume resources such as snapshots and export groups must be deleted
*
* @param volumeURIs
* The POST data specifying the ids of the volume(s) to be
* deleted.
* @param force {@link DefaultValue} false
* @param type {@link DefaultValue} FULL
*
* @brief Delete multiple volumes
* @return A reference to a BlockTaskList containing a list of
* TaskResourceRep instances specifying the task data for each
* volume delete task.
* @throws InternalException
*/
@POST
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/deactivate")
public TaskList deleteVolumes(BulkDeleteParam volumeURIs, @DefaultValue("false") @QueryParam("force") boolean force, @DefaultValue("FULL") @QueryParam("type") String type) throws InternalException {
// Verify the some volumes were passed in the request.
BlockService.checkVolumesParameter(volumeURIs);
// For volume operations, user need to has TENANT_ADMIN role or proper ACLs etc.
StorageOSUser user = getUserFromContext();
Iterator<Volume> dbVolumeIter = _dbClient.queryIterativeObjects(getResourceClass(), volumeURIs.getIds());
Set<URI> tenantSet = new HashSet<>();
List<Volume> volumes = new ArrayList<Volume>();
while (dbVolumeIter.hasNext()) {
Volume vol = dbVolumeIter.next();
// Don't operate on VPLEX backend or RP Journal volumes (unless forced to).
BlockServiceUtils.validateNotAnInternalBlockObject(vol, force);
// Don't operate on volumes with boot volume tags (unless forced to).
BlockServiceUtils.validateNotABootVolume(vol, force);
if (!_permissionsHelper.userHasGivenRole(user, vol.getTenant().getURI(), Role.TENANT_ADMIN) && !_permissionsHelper.userHasGivenACL(user, vol.getProject().getURI(), ACL.OWN, ACL.ALL)) {
throw APIException.forbidden.insufficientPermissionsForUser(user.getName());
}
tenantSet.add(vol.getTenant().getURI());
volumes.add(vol);
}
// operation against the volume
if (!force) {
checkForPendingTasks(tenantSet, volumes);
}
// Volumes on different storage systems need to be deleted with
// separate, individual calls to the controller. Therefore, we
// need to map the volumes passed to the storage systems on
// which they reside.
Map<URI, List<URI>> systemVolumesMap = new HashMap<URI, List<URI>>();
// We will create a task resource response for each volume to
// be deleted and initialize it to the pending state. If there
// is a controller error deleting the volumes on a given storage
// system, we need to update the responses associated with these
// volumes to specify an error state.
Map<URI, List<TaskResourceRep>> systemTaskResourceRepsMap = new HashMap<URI, List<TaskResourceRep>>();
// We create a global task list containing the task resource response
// for all volumes, which will be returned as the request response.
TaskList taskList = new TaskList();
// seem to be "stuck".
for (Volume volume : volumes) {
URI volumeURI = volume.getId();
ArgValidator.checkEntity(volume, volumeURI, isIdEmbeddedInURL(volumeURI));
BlockServiceApi blockServiceApi = getBlockServiceImpl(volume);
/**
* Delete volume api call will delete the replica objects as part of volume delete call for vmax using SMI
* 8.0.3.
* Hence we don't require reference check for vmax.
*/
if ((VolumeDeleteTypeEnum.VIPR_ONLY.name().equals(type)) || (!volume.isInCG()) || (!BlockServiceUtils.checkCGVolumeCanBeAddedOrRemoved(null, volume, _dbClient))) {
List<Class<? extends DataObject>> excludeTypes = null;
if (VolumeDeleteTypeEnum.VIPR_ONLY.name().equals(type)) {
// For ViPR-only delete of exported volumes, We will clean up any
// export groups/masks if the snapshot is exported.
excludeTypes = new ArrayList<>();
excludeTypes.add(ExportGroup.class);
excludeTypes.add(ExportMask.class);
}
// If we are deleting a boot volume, there may still be a reference to the volume
// in the decommissioned host. Since the force flag is used, we will clear out this
// reference in the host so the volume can be deleted.
List<Host> hosts = CustomQueryUtility.queryActiveResourcesByRelation(_dbClient, volume.getId(), Host.class, "bootVolumeId");
if (hosts != null) {
for (Host host : hosts) {
host.setBootVolumeId(NullColumnValueGetter.getNullURI());
_log.info("Removing boot volume ID from host: {}", host.forDisplay());
_dbClient.updateObject(host);
}
}
ArgValidator.checkReference(Volume.class, volumeURI, blockServiceApi.checkForDelete(volume, excludeTypes));
}
// full copies deleting the volume may not be allowed.
if ((!VolumeDeleteTypeEnum.VIPR_ONLY.name().equals(type)) && (!getFullCopyManager().volumeCanBeDeleted(volume))) {
throw APIException.badRequests.cantDeleteFullCopyNotDetached(volume.getLabel());
}
}
// since we issue one controller request per storage system, we must give each storage system
// a separate task id. Otherwise, we will create multiple workflows with the same task id
// which is not allowed.
// this maps task ids to their storage systems
Map<URI, String> systemURITaskIdMap = new HashMap<URI, String>();
// Now loop over the volumes, initializing the above constructs.
for (Volume volume : volumes) {
URI volumeURI = volume.getId();
// If the volume has active associated volumes, try to deactivate regardless
// of native ID or inactive state. This basically means it's a VPLEX volume.
boolean forceDeactivate = checkIfVplexVolumeHasActiveAssociatedVolumes(volume);
if (forceDeactivate || (!Strings.isNullOrEmpty(volume.getNativeId()) && !volume.getInactive())) {
URI systemURI = null;
if (!isNullURI(volume.getProtectionController())) {
systemURI = volume.getProtectionController();
} else {
systemURI = volume.getStorageController();
}
if (systemURITaskIdMap.get(systemURI) == null) {
systemURITaskIdMap.put(systemURI, UUID.randomUUID().toString());
}
String task = systemURITaskIdMap.get(systemURI);
// Create a task resource response for this volume and
// set the initial task state to pending.
// Initialize volume delete task status.
Operation op = _dbClient.createTaskOpStatus(Volume.class, volume.getId(), task, ResourceOperationTypeEnum.DELETE_BLOCK_VOLUME);
TaskResourceRep volumeTaskResourceRep = toTask(volume, task, op);
List<URI> systemVolumeURIs = systemVolumesMap.get(systemURI);
if (systemVolumeURIs == null) {
// Create a list to hold the volumes for the
// system, add the volume to the list, and put
// the list in the system volumes map.
systemVolumeURIs = new ArrayList<URI>();
systemVolumeURIs.add(volumeURI);
systemVolumesMap.put(systemURI, systemVolumeURIs);
// Build a list to hold the task resource responses for
// the system. Create a task resource response for
// this volume and add it to the list for the system.
// Put the list for the system into the map.
List<TaskResourceRep> systemTaskResourceReps = new ArrayList<TaskResourceRep>();
systemTaskResourceReps.add(volumeTaskResourceRep);
systemTaskResourceRepsMap.put(systemURI, systemTaskResourceReps);
} else if (!systemVolumeURIs.contains(volumeURI)) {
// Add the volume to the system's volume list if it has
// not already been added. Duplicates are just ignored.
systemVolumeURIs.add(volumeURI);
List<TaskResourceRep> systemTaskResourceReps = systemTaskResourceRepsMap.get(systemURI);
systemTaskResourceReps.add(volumeTaskResourceRep);
}
// Add the task resource response for the volume to the global list
// to be returned.
taskList.getTaskList().add(volumeTaskResourceRep);
} else if (!volume.getInactive()) {
// somehow no nativeId is set on volume, but it was active. Set it to not active
volume.setInactive(true);
_dbClient.persistObject(volume);
}
}
// Try and delete the volumes on each system.
Iterator<URI> systemsURIIter = systemVolumesMap.keySet().iterator();
while (systemsURIIter.hasNext()) {
URI systemURI = systemsURIIter.next();
String task = systemURITaskIdMap.get(systemURI);
try {
List<URI> systemVolumes = systemVolumesMap.get(systemURI);
BlockServiceApi blockServiceApi = getBlockServiceImpl(queryVolumeResource(systemVolumes.get(0)));
blockServiceApi.deleteVolumes(systemURI, systemVolumes, type, task);
} catch (APIException | InternalException e) {
if (_log.isErrorEnabled()) {
_log.error("Delete error", e);
}
List<TaskResourceRep> systemTaskResourceReps = systemTaskResourceRepsMap.get(systemURI);
for (TaskResourceRep volumeTask : systemTaskResourceReps) {
volumeTask.setState(Operation.Status.error.name());
volumeTask.setMessage(e.getMessage());
_dbClient.updateTaskOpStatus(Volume.class, volumeTask.getResource().getId(), task, new Operation(Operation.Status.error.name(), e.getMessage()));
}
}
}
auditOp(OperationTypeEnum.DELETE_BLOCK_VOLUME, true, AuditLogManager.AUDITOP_MULTI_BEGIN);
return taskList;
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockService method changeVirtualArrayForVolumes.
/**
* Changes the virtual array for the passed volumes to the passed
* target virtual array.
*
* @param volumeURIs
* The URIs of the volumes to move
* @param tgtVarrayURI
* The URI of the target virtual array
*
* @return A TaskList of the tasks associated with each volume being moved.
*
* @throws InternalException,
* APIException
*/
private TaskList changeVirtualArrayForVolumes(List<URI> volumeURIs, URI tgtVarrayURI) throws InternalException, APIException {
// Create the result.
TaskList taskList = new TaskList();
// Create a unique task id.
String taskId = UUID.randomUUID().toString();
// Validate that each of the volumes passed in is eligible
// for the varray change.
VirtualArray tgtVarray = null;
BlockConsistencyGroup cg = null;
BlockServiceApi blockServiceAPI = null;
List<Volume> volumes = new ArrayList<Volume>();
List<Volume> cgVolumes = new ArrayList<Volume>();
boolean foundVolumeNotInCG = false;
for (URI volumeURI : volumeURIs) {
// Get and verify the volume.
ArgValidator.checkFieldUriType(volumeURI, Volume.class, "volume");
Volume volume = queryVolumeResource(volumeURI);
ArgValidator.checkEntity(volume, volumeURI, false);
_log.info("Found volume {}", volumeURI);
// Don't operate on VPLEX backend or RP Journal volumes.
BlockServiceUtils.validateNotAnInternalBlockObject(volume, false);
// Don't operate on ingested volumes.
VolumeIngestionUtil.checkOperationSupportedOnIngestedVolume(volume, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VARRAY, _dbClient);
// Get and validate the volume's project.
URI projectURI = volume.getProject().getURI();
Project project = _permissionsHelper.getObjectById(projectURI, Project.class);
ArgValidator.checkEntity(project, projectURI, false);
_log.info("Found volume project {}", projectURI);
// Verify the user is authorized for the volume's project.
BlockServiceUtils.verifyUserIsAuthorizedForRequest(project, getUserFromContext(), _permissionsHelper);
_log.info("User is authorized for volume's project");
// Verify the current and requested virtual arrays are not the same.
if (volume.getVirtualArray().equals(tgtVarrayURI)) {
throw APIException.badRequests.currentAndRequestedVArrayAreTheSame();
}
// Get and validate the target virtual array.
if (tgtVarray == null) {
tgtVarray = BlockServiceUtils.verifyVirtualArrayForRequest(project, tgtVarrayURI, uriInfo, _permissionsHelper, _dbClient);
_log.info("Found new VirtualArray {}", tgtVarrayURI);
}
// Make sure that we don't have some pending
// operation against the volume
checkForPendingTasks(Arrays.asList(volume.getTenant().getURI()), Arrays.asList(volume));
// Get the appropriate block service implementation for the
// volume. Note that this same implementation is used to
// execute the change. If it is possible that volumes
// with multiple implementations can be selected for a
// varray change, then we would need a map of the
// implementation to use for a given volume. However,
// currently only VPLEX volumes can be moved, so valid
// volumes for a varray change will always have the same
// implementation.
blockServiceAPI = getBlockServiceImpl(volume);
// Verify that the virtual array change is allowed for the
// requested volume and virtual array.
blockServiceAPI.verifyVarrayChangeSupportedForVolumeAndVarray(volume, tgtVarray);
_log.info("Virtual array change is supported for requested volume and varray");
// All volumes must be a CG or none of the volumes can be
// in a CG. After processing individual volumes, if the
// volumes are in a CG, then we make sure all volumes in the
// CG and only the volumes in the CG are passed.
URI cgURI = volume.getConsistencyGroup();
if ((cg == null) && (!foundVolumeNotInCG)) {
if (!isNullURI(cgURI)) {
cg = _permissionsHelper.getObjectById(cgURI, BlockConsistencyGroup.class);
_log.info("All volumes should be in CG {}:{}", cgURI, cg.getLabel());
cgVolumes.addAll(blockServiceAPI.getActiveCGVolumes(cg));
} else {
_log.info("No volumes should be in CGs");
foundVolumeNotInCG = true;
}
} else if (((cg != null) && (isNullURI(cgURI))) || ((foundVolumeNotInCG) && (!isNullURI(cgURI)))) {
// A volume was in a CG, so all volumes must be in a CG.
if (cg != null) {
// Volumes should all be in the CG and this one is not.
_log.error("Volume {}:{} is not in the CG", volumeURI, volume.getLabel());
} else {
_log.error("Volume {}:{} is in CG {}", new Object[] { volumeURI, volume.getLabel(), cgURI });
}
throw APIException.badRequests.mixedVolumesinCGForVarrayChange();
}
// Add the volume to the list
volumes.add(volume);
}
// all in the same CG and all volumes are passed.
if (cg != null) {
// all volume in CG must have been passed.
_log.info("Verify all volumes in CG {}:{}", cg.getId(), cg.getLabel());
URI storageId = cg.getStorageController();
if (!NullColumnValueGetter.isNullURI(storageId)) {
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageId);
if (DiscoveredDataObject.Type.vplex.name().equals(storage.getSystemType())) {
// in the CG.
if (!VPlexUtil.verifyVolumesInCG(volumes, cgVolumes, _dbClient)) {
throw APIException.badRequests.cantChangeVarrayNotAllCGVolumes();
}
} else {
verifyVolumesInCG(volumes, cgVolumes);
}
} else {
verifyVolumesInCG(volumes, cgVolumes);
}
}
// task state to pending.
for (Volume volume : volumes) {
Operation op = _dbClient.createTaskOpStatus(Volume.class, volume.getId(), taskId, ResourceOperationTypeEnum.CHANGE_BLOCK_VOLUME_VARRAY);
TaskResourceRep resourceTask = toTask(volume, taskId, op);
taskList.addTask(resourceTask);
}
// Now execute the varray change for the volumes.
if (cg != null) {
try {
// When the volumes are part of a CG, executed as a single workflow.
blockServiceAPI.changeVirtualArrayForVolumes(volumes, cg, cgVolumes, tgtVarray, taskId);
_log.info("Executed virtual array change for volumes");
} catch (InternalException | APIException e) {
// Fail all the tasks.
String errorMsg = String.format("Volume virtual array change error: %s", e.getMessage());
_log.error(errorMsg);
for (TaskResourceRep resourceTask : taskList.getTaskList()) {
resourceTask.setState(Operation.Status.error.name());
resourceTask.setMessage(errorMsg);
_dbClient.error(Volume.class, resourceTask.getResource().getId(), taskId, e);
}
} catch (Exception e) {
// Fail all the tasks.
String errorMsg = String.format("Volume virtual array change error: %s", e.getMessage());
_log.error(errorMsg);
for (TaskResourceRep resourceTask : taskList.getTaskList()) {
resourceTask.setState(Operation.Status.error.name());
resourceTask.setMessage(errorMsg);
_dbClient.error(Volume.class, resourceTask.getResource().getId(), taskId, InternalServerErrorException.internalServerErrors.unexpectedErrorDuringVarrayChange(e));
}
}
} else {
// When the volumes are not in a CG, then execute as individual workflows.
for (Volume volume : volumes) {
try {
blockServiceAPI.changeVirtualArrayForVolumes(Arrays.asList(volume), cg, cgVolumes, tgtVarray, taskId);
_log.info("Executed virtual array change for volume {}", volume.getId());
} catch (InternalException | APIException e) {
String errorMsg = String.format("Volume virtual array change error: %s", e.getMessage());
_log.error(errorMsg);
for (TaskResourceRep resourceTask : taskList.getTaskList()) {
// Fail the correct task.
if (resourceTask.getResource().getId().equals(volume.getId())) {
resourceTask.setState(Operation.Status.error.name());
resourceTask.setMessage(errorMsg);
_dbClient.error(Volume.class, resourceTask.getResource().getId(), taskId, e);
}
}
} catch (Exception e) {
// Fail all the tasks.
String errorMsg = String.format("Volume virtual array change error: %s", e.getMessage());
_log.error(errorMsg);
for (TaskResourceRep resourceTask : taskList.getTaskList()) {
// Fail the correct task.
if (resourceTask.getResource().getId().equals(volume.getId())) {
resourceTask.setState(Operation.Status.error.name());
resourceTask.setMessage(errorMsg);
_dbClient.error(Volume.class, resourceTask.getResource().getId(), taskId, InternalServerErrorException.internalServerErrors.unexpectedErrorDuringVarrayChange(e));
}
}
}
}
}
return taskList;
}
Aggregations