use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class VolumeGroupService method activateVolumeGroupFullCopy.
/**
* Activate the specified Volume group full copy.
* - Activates full copy for all the array replication groups within this Application.
* - If partial flag is specified, it activates full copy only for set of array replication groups.
* A Full Copy from each array replication group can be provided to indicate which array replication
* groups's full copies needs to be activated.
*
* @prereq Create Volume group full copy as inactive.
*
* @param volumeGroupId The URI of the Volume group.
* @param fullCopyURI The URI of the full copy.
*
* @brief Activate Volume group full copy.
*
* @return TaskList
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/protection/full-copies/activate")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.ANY })
public TaskList activateVolumeGroupFullCopy(@PathParam("id") final URI volumeGroupId, final VolumeGroupFullCopyActivateParam param) {
ArgValidator.checkFieldUriType(volumeGroupId, VolumeGroup.class, "id");
// Query Volume Group
final VolumeGroup volumeGroup = (VolumeGroup) queryResource(volumeGroupId);
TaskList taskList = new TaskList();
// validate replica operation for volume group
validateCopyOperationForVolumeGroup(volumeGroup, ReplicaTypeEnum.FULL_COPY);
// validate the requested full copies
List<Volume> fullCopyVolumesInRequest = new ArrayList<Volume>();
boolean partial = validateFullCopiesInRequest(fullCopyVolumesInRequest, param.getFullCopies(), param.getCopySetName(), param.getSubGroups(), volumeGroupId);
/**
* 1. VolumeGroupService Clone API accepts a Clone URI (to identify clone set and RG)
* - then get All full copies belonging to same full copy set
* - get full copy set name from the requested full copy
* 2. If partial, there will be a List of Clone URIs (one from each RG)
* 3. Group the full copies by Replication Group(RG)
* 4. For each RG, invoke the ConsistencyGroup full copy API (CG uri, clone uri)
* - a. Skip the CG/RG calls when thrown error and continue with other entries; create 'ERROR' Task for this call
* - b. Finally return the Task List (RG tasks may finish at different times as they are different calls)
*/
if (!partial) {
Volume fullCopy = fullCopyVolumesInRequest.get(0);
log.info("Full Copy operation requested for entire Application, Considering full copy {} in request.", fullCopy.getLabel());
fullCopyVolumesInRequest.clear();
fullCopyVolumesInRequest.addAll(getClonesBySetName(fullCopy.getFullCopySetName(), volumeGroup.getId()));
} else {
log.info("Full Copy operation requested for subset of array replication groups in Application.");
}
Map<String, Volume> repGroupToFullCopyMap = groupVolumesByReplicationGroup(fullCopyVolumesInRequest);
for (Map.Entry<String, Volume> entry : repGroupToFullCopyMap.entrySet()) {
String replicationGroup = entry.getKey();
Volume fullCopy = entry.getValue();
log.info("Processing Array Replication Group {}, Full Copy {}", replicationGroup, fullCopy.getLabel());
try {
// get CG URI
URI cgURI = getConsistencyGroupForFullCopy(fullCopy);
// Activate the full copy. Note that it will take into account the
// fact that the volume is in a ReplicationGroup
// and all volumes in that ReplicationGroup will be activated.
taskList.getTaskList().addAll(_blockConsistencyGroupService.activateConsistencyGroupFullCopy(cgURI, fullCopy.getId()).getTaskList());
} catch (InternalException | APIException e) {
String errMsg = String.format("Error activating Array Replication Group %s, Full Copy %s", replicationGroup, fullCopy.getLabel());
log.error(errMsg, e);
TaskResourceRep task = BlockServiceUtils.createFailedTaskOnVolume(_dbClient, fullCopy, ResourceOperationTypeEnum.ACTIVATE_VOLUME_FULL_COPY, e);
taskList.addTask(task);
}
}
if (!partial) {
auditOp(OperationTypeEnum.ACTIVATE_VOLUME_GROUP_FULL_COPY, true, AuditLogManager.AUDITOP_BEGIN, volumeGroup.getId().toString(), fullCopyVolumesInRequest.get(0).getLabel());
}
return taskList;
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class IngestVolumesExportedSchedulingThread method run.
@Override
public void run() {
try {
_requestContext.reset();
URI varrayId = null;
while (_requestContext.hasNext()) {
UnManagedVolume unManagedVolume = _requestContext.next();
_logger.info("Ingestion starting for exported unmanaged volume {}", unManagedVolume.getNativeGuid());
if (null == varrayId) {
varrayId = _requestContext.getVarray(unManagedVolume).getId();
}
TaskResourceRep resourceRep = _taskMap.get(unManagedVolume.getId().toString());
String taskId = resourceRep != null ? resourceRep.getOpId() : null;
try {
URI storageSystemUri = unManagedVolume.getStorageSystemUri();
StorageSystem system = _requestContext.getStorageSystemCache().get(storageSystemUri.toString());
if (null == system) {
system = _dbClient.queryObject(StorageSystem.class, storageSystemUri);
_requestContext.getStorageSystemCache().put(storageSystemUri.toString(), system);
}
// Build the Strategy , which contains reference to Block object & export orchestrators
IngestStrategy ingestStrategy = _ingestStrategyFactory.buildIngestStrategy(unManagedVolume, !IngestStrategyFactory.DISREGARD_PROTECTION);
@SuppressWarnings("unchecked") BlockObject blockObject = ingestStrategy.ingestBlockObjects(_requestContext, VolumeIngestionUtil.getBlockObjectClass(unManagedVolume));
if (null == blockObject) {
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
_requestContext.getBlockObjectsToBeCreatedMap().put(blockObject.getNativeGuid(), blockObject);
_requestContext.getProcessedUnManagedVolumeMap().put(unManagedVolume.getNativeGuid(), _requestContext.getVolumeContext());
_logger.info("Volume ingestion completed successfully for exported unmanaged volume {} (export ingestion will follow)", unManagedVolume.getNativeGuid());
} catch (APIException ex) {
_logger.error("error: " + ex.getLocalizedMessage(), ex);
_dbClient.error(UnManagedVolume.class, _requestContext.getCurrentUnManagedVolumeUri(), taskId, ex);
_requestContext.getVolumeContext().rollback();
} catch (Exception ex) {
_logger.error("error: " + ex.getLocalizedMessage(), ex);
_dbClient.error(UnManagedVolume.class, _requestContext.getCurrentUnManagedVolumeUri(), taskId, IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), ex.getLocalizedMessage()));
_requestContext.getVolumeContext().rollback();
}
}
_logger.info("Ingestion of all the unmanaged volumes has completed.");
// next ingest the export masks for the unmanaged volumes which have been fully ingested
_logger.info("Ingestion of unmanaged export masks for all requested volumes starting.");
ingestBlockExportMasks(_requestContext, _taskMap);
for (VolumeIngestionContext volumeContext : _requestContext.getProcessedUnManagedVolumeMap().values()) {
// If there is a CG involved in the ingestion, organize, pollenate, and commit.
_unManagedVolumeService.commitIngestedCG(_requestContext, volumeContext.getUnmanagedVolume());
// commit the volume itself
volumeContext.commit();
}
for (BlockObject bo : _requestContext.getObjectsIngestedByExportProcessing()) {
_logger.info("Ingestion Wrap Up: Creating BlockObject {} (hash {})", bo.forDisplay(), bo.hashCode());
_dbClient.createObject(bo);
}
for (UnManagedVolume umv : _requestContext.getUnManagedVolumesToBeDeleted()) {
_logger.info("Ingestion Wrap Up: Deleting UnManagedVolume {} (hash {})", umv.forDisplay(), umv.hashCode());
_dbClient.updateObject(umv);
}
// Update the related objects if any after successful export mask ingestion
for (Entry<String, Set<DataObject>> updatedObjectsEntry : _requestContext.getDataObjectsToBeUpdatedMap().entrySet()) {
if (updatedObjectsEntry != null) {
_logger.info("Ingestion Wrap Up: Updating objects for UnManagedVolume URI " + updatedObjectsEntry.getKey());
for (DataObject dob : updatedObjectsEntry.getValue()) {
if (dob.getInactive()) {
_logger.info("Ingestion Wrap Up: Deleting DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
} else {
_logger.info("Ingestion Wrap Up: Updating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
}
_dbClient.updateObject(dob);
}
}
}
// Create the related objects if any after successful export mask ingestion
for (Set<DataObject> createdObjects : _requestContext.getDataObjectsToBeCreatedMap().values()) {
if (createdObjects != null && !createdObjects.isEmpty()) {
for (DataObject dob : createdObjects) {
_logger.info("Ingestion Wrap Up: Creating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
_dbClient.createObject(dob);
}
}
}
ExportGroup exportGroup = _requestContext.getExportGroup();
if (_requestContext.isExportGroupCreated()) {
_logger.info("Ingestion Wrap Up: Creating ExportGroup {} (hash {})", exportGroup.forDisplay(), exportGroup.hashCode());
_dbClient.createObject(exportGroup);
} else {
_logger.info("Ingestion Wrap Up: Updating ExportGroup {} (hash {})", exportGroup.forDisplay(), exportGroup.hashCode());
_dbClient.updateObject(exportGroup);
}
// record the events after they have been persisted
for (BlockObject volume : _requestContext.getObjectsIngestedByExportProcessing()) {
_unManagedVolumeService.recordVolumeOperation(_dbClient, _unManagedVolumeService.getOpByBlockObjectType(volume), Status.ready, volume.getId());
}
} catch (InternalException e) {
_logger.error("InternalException occurred due to: {}", e);
throw e;
} catch (Exception e) {
_logger.error("Unexpected exception occurred due to: {}", e);
throw APIException.internalServerErrors.genericApisvcError(ExceptionUtils.getExceptionMessage(e), e);
} finally {
// it, then we should clean it up in the database (CTRL-8520)
if ((null != _requestContext) && _requestContext.isExportGroupCreated() && _requestContext.getObjectsIngestedByExportProcessing().isEmpty()) {
_logger.info("Ingestion Wrap Up: an export group was created, but no volumes were ingested into it");
if (_requestContext.getExportGroup().getVolumes() == null || _requestContext.getExportGroup().getVolumes().isEmpty()) {
_logger.info("Ingestion Wrap Up: since no volumes are present, marking {} for deletion", _requestContext.getExportGroup().getLabel());
_dbClient.markForDeletion(_requestContext.getExportGroup());
}
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class IngestVolumesExportedSchedulingThread method ingestBlockExportMasks.
/**
* Ingest block export masks for the already-ingested Volumes.
*
* @param requestContext the IngestionRequestContext
* @param taskMap a Map of UnManagedVolume ids to TaskResourceReps
*/
private void ingestBlockExportMasks(IngestionRequestContext requestContext, Map<String, TaskResourceRep> taskMap) {
for (String unManagedVolumeGUID : requestContext.getProcessedUnManagedVolumeMap().keySet()) {
BlockObject processedBlockObject = requestContext.getProcessedBlockObject(unManagedVolumeGUID);
VolumeIngestionContext volumeContext = requestContext.getVolumeContext(unManagedVolumeGUID);
UnManagedVolume processedUnManagedVolume = volumeContext.getUnmanagedVolume();
URI unManagedVolumeUri = processedUnManagedVolume.getId();
TaskResourceRep resourceRep = taskMap.get(processedUnManagedVolume.getId().toString());
String taskId = resourceRep != null ? resourceRep.getOpId() : null;
try {
if (processedBlockObject == null) {
_logger.warn("The ingested block object is null. Skipping ingestion of export masks for unmanaged volume {}", unManagedVolumeGUID);
throw IngestionException.exceptions.generalVolumeException(processedUnManagedVolume.getLabel(), "check the logs for more details");
}
// Build the Strategy , which contains reference to Block object & export orchestrators
IngestExportStrategy ingestStrategy = _ingestStrategyFactory.buildIngestExportStrategy(processedUnManagedVolume);
BlockObject blockObject = ingestStrategy.ingestExportMasks(processedUnManagedVolume, processedBlockObject, requestContext);
if (null == blockObject) {
throw IngestionException.exceptions.generalVolumeException(processedUnManagedVolume.getLabel(), "check the logs for more details");
}
if (null == blockObject.getCreationTime()) {
// only add objects to create if they were created this round of ingestion,
// creationTime will be null unless the object has already been saved to the db
requestContext.getObjectsIngestedByExportProcessing().add(blockObject);
}
// If the ingested object is internal, flag an error. If it's an RP volume, it's exempt from this check.
if (blockObject.checkInternalFlags(Flag.PARTIALLY_INGESTED) && !(blockObject instanceof Volume && ((Volume) blockObject).getRpCopyName() != null)) {
StringBuffer taskStatus = requestContext.getTaskStatusMap().get(processedUnManagedVolume.getNativeGuid());
String taskMessage = "";
if (taskStatus == null) {
// No task status found. Put in a default message.
taskMessage = String.format("Not all the parent/replicas of unmanaged volume %s have been ingested", processedUnManagedVolume.getLabel());
} else {
taskMessage = taskStatus.toString();
}
_dbClient.error(UnManagedVolume.class, processedUnManagedVolume.getId(), taskId, IngestionException.exceptions.unmanagedVolumeIsNotVisible(processedUnManagedVolume.getLabel(), taskMessage));
} else {
_dbClient.ready(UnManagedVolume.class, processedUnManagedVolume.getId(), taskId, INGESTION_SUCCESSFUL_MSG);
}
} catch (APIException ex) {
_logger.warn(ex.getLocalizedMessage(), ex);
_dbClient.error(UnManagedVolume.class, unManagedVolumeUri, taskId, ex);
volumeContext.rollback();
} catch (Exception ex) {
_logger.warn(ex.getLocalizedMessage(), ex);
_dbClient.error(UnManagedVolume.class, unManagedVolumeUri, taskId, IngestionException.exceptions.generalVolumeException(processedUnManagedVolume.getLabel(), ex.getLocalizedMessage()));
volumeContext.rollback();
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class IngestVolumesUnexportedSchedulingThread method run.
@Override
public void run() {
_requestContext.reset();
while (_requestContext.hasNext()) {
UnManagedVolume unManagedVolume = _requestContext.next();
String taskId = _taskMap.get(unManagedVolume.getId().toString());
try {
_logger.info("Ingestion starting for unmanaged volume {}", unManagedVolume.getNativeGuid());
List<URI> volList = new ArrayList<URI>();
volList.add(_requestContext.getCurrentUnManagedVolumeUri());
VolumeIngestionUtil.checkIngestionRequestValidForUnManagedVolumes(volList, _requestContext.getVpool(unManagedVolume), _dbClient);
IngestStrategy ingestStrategy = _ingestStrategyFactory.buildIngestStrategy(unManagedVolume, !IngestStrategyFactory.DISREGARD_PROTECTION);
@SuppressWarnings("unchecked") BlockObject blockObject = ingestStrategy.ingestBlockObjects(_requestContext, VolumeIngestionUtil.getBlockObjectClass(unManagedVolume));
if (null == blockObject) {
throw IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), "check the logs for more details");
}
_logger.info("Ingestion completed successfully for unmanaged volume {}", unManagedVolume.getNativeGuid());
_requestContext.getBlockObjectsToBeCreatedMap().put(blockObject.getNativeGuid(), blockObject);
_requestContext.getProcessedUnManagedVolumeMap().put(unManagedVolume.getNativeGuid(), _requestContext.getVolumeContext());
} catch (APIException ex) {
_logger.error("APIException occurred", ex);
_dbClient.error(UnManagedVolume.class, _requestContext.getCurrentUnManagedVolumeUri(), taskId, ex);
_requestContext.getVolumeContext().rollback();
} catch (Exception ex) {
_logger.error("Exception occurred", ex);
_dbClient.error(UnManagedVolume.class, _requestContext.getCurrentUnManagedVolumeUri(), taskId, IngestionException.exceptions.generalVolumeException(unManagedVolume.getLabel(), ex.getLocalizedMessage()));
_requestContext.getVolumeContext().rollback();
}
}
try {
// update the task status
for (String unManagedVolumeGUID : _requestContext.getProcessedUnManagedVolumeMap().keySet()) {
VolumeIngestionContext volumeContext = _requestContext.getProcessedUnManagedVolumeMap().get(unManagedVolumeGUID);
UnManagedVolume unManagedVolume = volumeContext.getUnmanagedVolume();
String taskMessage = "";
String taskId = _taskMap.get(unManagedVolume.getId().toString());
boolean ingestedSuccessfully = false;
if (unManagedVolume.getInactive()) {
ingestedSuccessfully = true;
taskMessage = INGESTION_SUCCESSFUL_MSG;
} else {
// check in the created objects for corresponding block object without any internal flags set
BlockObject createdObject = _requestContext.findCreatedBlockObject(unManagedVolumeGUID.replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME));
_logger.info("checking partial ingestion status of block object " + createdObject);
if ((null != createdObject) && (!createdObject.checkInternalFlags(Flag.PARTIALLY_INGESTED) || // If this is an ingested RP volume in an uningested protection set, the ingest is successful.
(createdObject instanceof Volume && ((Volume) createdObject).checkForRp() && ((Volume) createdObject).getProtectionSet() == null)) || // If this is a successfully processed VPLEX backend volume, it will have the INTERNAL_OBJECT Flag
(VolumeIngestionUtil.isVplexBackendVolume(unManagedVolume) && createdObject.checkInternalFlags(Flag.INTERNAL_OBJECT))) {
_logger.info("successfully partially ingested block object {} ", createdObject.forDisplay());
ingestedSuccessfully = true;
taskMessage = INGESTION_SUCCESSFUL_MSG;
} else {
_logger.info("block object {} was not (partially) ingested successfully", createdObject);
ingestedSuccessfully = false;
StringBuffer taskStatus = _requestContext.getTaskStatusMap().get(unManagedVolume.getNativeGuid());
if (taskStatus == null) {
// No task status found. Put in a default message.
taskMessage = String.format("Not all the parent/replicas of unmanaged volume %s have been ingested", unManagedVolume.getLabel());
} else {
taskMessage = taskStatus.toString();
}
}
}
if (ingestedSuccessfully) {
_dbClient.ready(UnManagedVolume.class, unManagedVolume.getId(), taskId, taskMessage);
} else {
_dbClient.error(UnManagedVolume.class, unManagedVolume.getId(), taskId, IngestionException.exceptions.unmanagedVolumeIsNotVisible(unManagedVolume.getLabel(), taskMessage));
}
// Commit any ingested CG
_unManagedVolumeService.commitIngestedCG(_requestContext, unManagedVolume);
// Commit the volume's internal resources
volumeContext.commit();
// Commit this volume's updated data objects if any after ingestion
Set<DataObject> updatedObjects = _requestContext.getDataObjectsToBeUpdatedMap().get(unManagedVolumeGUID);
if (updatedObjects != null && !updatedObjects.isEmpty()) {
for (DataObject dob : updatedObjects) {
_logger.info("Ingestion Wrap Up: Updating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
_dbClient.updateObject(dob);
}
}
// Commit this volume's created data objects if any after ingestion
Set<DataObject> createdObjects = _requestContext.getDataObjectsToBeCreatedMap().get(unManagedVolumeGUID);
if (createdObjects != null && !createdObjects.isEmpty()) {
for (DataObject dob : createdObjects) {
_logger.info("Ingestion Wrap Up: Creating DataObject {} (hash {})", dob.forDisplay(), dob.hashCode());
_dbClient.createObject(dob);
}
}
}
} catch (InternalException e) {
throw e;
} catch (Exception e) {
_logger.debug("Unexpected ingestion exception:", e);
throw APIException.internalServerErrors.genericApisvcError(ExceptionUtils.getExceptionMessage(e), e);
}
for (BlockObject bo : _requestContext.getBlockObjectsToBeCreatedMap().values()) {
_logger.info("Ingestion Wrap Up: Creating BlockObject {} (hash {})", bo.forDisplay(), bo.hashCode());
_dbClient.createObject(bo);
}
for (UnManagedVolume umv : _requestContext.getUnManagedVolumesToBeDeleted()) {
_logger.info("Ingestion Wrap Up: Deleting UnManagedVolume {} (hash {})", umv.forDisplay(), umv.hashCode());
_dbClient.updateObject(umv);
}
// record the events after they have been persisted
for (BlockObject volume : _requestContext.getBlockObjectsToBeCreatedMap().values()) {
_unManagedVolumeService.recordVolumeOperation(_dbClient, _unManagedVolumeService.getOpByBlockObjectType(volume), Status.ready, volume.getId());
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class ConsistencyGroupService method deleteConsistencyGroup.
/**
* Delete consistency group
*
* @param openstackTenantId openstack tenant id
* @param consistencyGroupId consistency group id
* @param param pojo class to bind request
* @param isV1Call cinder V1 api
* @param header HTTP header
* @brief delete Consistency group
* @return Response
*/
@POST
@Path("/{consistencyGroup_id}/delete")
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public Response deleteConsistencyGroup(@PathParam("tenant_id") String openstackTenantId, @PathParam("consistencyGroup_id") String consistencyGroupId, ConsistencyGroupDeleteRequest param, @HeaderParam("X-Cinder-V1-Call") String isV1Call, @Context HttpHeaders header) {
boolean isForced = param.consistencygroup.force;
final BlockConsistencyGroup consistencyGroup = findConsistencyGroup(consistencyGroupId, openstackTenantId);
if (consistencyGroup == null) {
_log.error("Not Found : No Such Consistency Group Found {}", consistencyGroupId);
return CinderApiUtils.createErrorResponse(404, "Not Found : No Such Consistency Group Found");
} else if (!consistencyGroupId.equals(CinderApiUtils.splitString(consistencyGroup.getId().toString(), ":", 3))) {
_log.error("Bad Request : There is no consistency group with id {} , please retry with correct consistency group id", consistencyGroupId);
return CinderApiUtils.createErrorResponse(400, "Bad Request : There is no consistency group exist, please retry with correct consistency group id");
}
String task = UUID.randomUUID().toString();
TaskResourceRep taskRep = null;
if (getCinderHelper().verifyConsistencyGroupHasSnapshot(consistencyGroup)) {
_log.error("Bad Request : Consistency Group {} has Snapshot", consistencyGroupId);
return CinderApiUtils.createErrorResponse(400, "Bad Request : Consistency Group has Snapshot ");
}
if (isForced) {
final URIQueryResultList cgVolumesResults = new URIQueryResultList();
_dbClient.queryByConstraint(getVolumesByConsistencyGroup(consistencyGroup.getId()), cgVolumesResults);
while (cgVolumesResults.iterator().hasNext()) {
Volume volume = _dbClient.queryObject(Volume.class, cgVolumesResults.iterator().next());
if (!volume.getInactive()) {
BlockServiceApi api = BlockService.getBlockServiceImpl(volume, _dbClient);
URI systemUri = volume.getStorageController();
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.add(volume.getId());
api.deleteVolumes(systemUri, volumeURIs, "FULL", null);
if (volume.getExtensions() == null) {
volume.setExtensions(new StringMap());
}
volume.getExtensions().put("status", CinderConstants.ComponentStatus.DELETING.getStatus().toLowerCase());
volume.setInactive(true);
_dbClient.updateObject(volume);
}
}
}
try {
ArgValidator.checkReference(BlockConsistencyGroup.class, consistencyGroup.getId(), checkForDelete(consistencyGroup));
} catch (APIException e) {
_log.error("Bad Request : Consistency Group Contains active references of type : {}", e.getMessage());
return CinderApiUtils.createErrorResponse(400, "Bad Request : Consistency Group Contains active references");
}
// RP + VPlex CGs cannot be be deleted without VPlex controller intervention.
if (!consistencyGroup.getTypes().contains(Types.VPLEX.toString()) || canDeleteConsistencyGroup(consistencyGroup)) {
final URIQueryResultList cgVolumesResults = new URIQueryResultList();
_dbClient.queryByConstraint(getVolumesByConsistencyGroup(consistencyGroup.getId()), cgVolumesResults);
while (cgVolumesResults.iterator().hasNext()) {
Volume volume = _dbClient.queryObject(Volume.class, cgVolumesResults.iterator().next());
if (!volume.getInactive()) {
return CinderApiUtils.createErrorResponse(400, "Bad Request : Try to delete consistency group with --force");
}
}
consistencyGroup.setStorageController(null);
consistencyGroup.setInactive(true);
_dbClient.updateObject(consistencyGroup);
taskRep = finishDeactivateTask(consistencyGroup, task);
if (taskRep.getState().equals("ready") || taskRep.getState().equals("pending")) {
return Response.status(202).build();
}
}
final StorageSystem storageSystem = consistencyGroup.created() ? _permissionsHelper.getObjectById(consistencyGroup.getStorageController(), StorageSystem.class) : null;
// If the consistency group has been created, and the system
// is a VPlex, then we need to do VPlex related things to destroy
// the consistency groups on the system. If the consistency group
// has not been created on the system or the system is not a VPlex
// revert to the default.
BlockServiceApi blockServiceApi = BlockService.getBlockServiceImpl("group");
if (storageSystem != null) {
String systemType = storageSystem.getSystemType();
if (DiscoveredDataObject.Type.vplex.name().equals(systemType)) {
blockServiceApi = BlockService.getBlockServiceImpl(systemType);
}
_log.info(String.format("BlockConsistencyGroup %s is associated to StorageSystem %s. Going to delete it on that array.", consistencyGroup.getLabel(), storageSystem.getNativeGuid()));
// Otherwise, invoke operation to delete CG from the array.
taskRep = blockServiceApi.deleteConsistencyGroup(storageSystem, consistencyGroup, task);
if (taskRep.getState().equals("ready") || taskRep.getState().equals("pending")) {
return Response.status(202).build();
}
}
if (taskRep == null) {
_log.info(String.format("BlockConsistencyGroup %s was not associated with any storage. Deleting it from ViPR only.", consistencyGroup.getLabel()));
TaskResourceRep resp = finishDeactivateTask(consistencyGroup, task);
if (resp.getState().equals("ready") || resp.getState().equals("pending")) {
return Response.status(202).build();
}
}
return CinderApiUtils.createErrorResponse(400, "Bad Request");
}
Aggregations