use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class DisasterRecoveryService method resumeStandby.
/**
* Resume data replication for a paused standby site
*
* @param uuid site UUID
* @brief Resume data replication for a paused standby site
* @return updated standby site representation
*/
@POST
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.SECURITY_ADMIN, Role.RESTRICTED_SECURITY_ADMIN, Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN }, blockProxies = true)
@Path("/{uuid}/resume")
public SiteRestRep resumeStandby(@PathParam("uuid") String uuid) {
log.info("Begin to resume data sync to standby site identified by uuid: {}", uuid);
Site standby = validateSiteConfig(uuid);
SiteState state = standby.getState();
if (!state.equals(SiteState.STANDBY_PAUSED) && !state.equals(SiteState.ACTIVE_DEGRADED)) {
log.error("site {} is in state {}, should be STANDBY_PAUSED or ACTIVE_DEGRADED", uuid, standby.getState());
throw APIException.badRequests.operationOnlyAllowedOnPausedSite(standby.getName(), standby.getState().toString());
}
SiteNetworkState networkState = drUtil.getSiteNetworkState(uuid);
if (networkState.getNetworkHealth() == NetworkHealth.BROKEN) {
throw APIException.internalServerErrors.siteConnectionBroken(standby.getName(), "Network health state is broken.");
}
try (InternalSiteServiceClient client = createInternalSiteServiceClient(standby)) {
commonPrecheck();
client.setCoordinatorClient(coordinator);
client.setKeyGenerator(apiSignatureGenerator);
client.resumePrecheck();
} catch (APIException e) {
throw e;
} catch (Exception e) {
throw APIException.internalServerErrors.resumeStandbyPrecheckFailed(standby.getName(), e.getMessage());
}
// Do this before tx get started which might write key to zk.
SecretKey secretKey = apiSignatureGenerator.getSignatureKey(SignatureKeyType.INTERVDC_API);
InterProcessLock lock = drUtil.getDROperationLock();
long vdcTargetVersion = DrUtil.newVdcConfigVersion();
try {
coordinator.startTransaction();
for (Site site : drUtil.listStandbySites()) {
if (site.getUuid().equals(uuid)) {
log.error("Re-init the target standby", uuid);
// init the to-be resumed standby site
long dataRevision = vdcTargetVersion;
List<Site> standbySites = drUtil.listStandbySites();
SiteConfigParam configParam = prepareSiteConfigParam(standbySites, ipsecConfig.getPreSharedKey(), uuid, dataRevision, vdcTargetVersion, secretKey);
try (InternalSiteServiceClient internalSiteServiceClient = new InternalSiteServiceClient()) {
internalSiteServiceClient.setCoordinatorClient(coordinator);
internalSiteServiceClient.setServer(site.getVipEndPoint());
internalSiteServiceClient.initStandby(configParam);
}
site.setState(SiteState.STANDBY_RESUMING);
coordinator.persistServiceConfiguration(site.toConfiguration());
drUtil.recordDrOperationStatus(site.getUuid(), InterState.RESUMING_STANDBY);
drUtil.updateVdcTargetVersion(uuid, SiteInfo.DR_OP_CHANGE_DATA_REVISION, vdcTargetVersion, dataRevision);
} else {
drUtil.updateVdcTargetVersion(site.getUuid(), SiteInfo.DR_OP_RESUME_STANDBY, vdcTargetVersion);
}
}
// update the local(active) site last
drUtil.updateVdcTargetVersion(coordinator.getSiteId(), SiteInfo.DR_OP_RESUME_STANDBY, vdcTargetVersion);
coordinator.commitTransaction();
auditDisasterRecoveryOps(OperationTypeEnum.RESUME_STANDBY, AuditLogManager.AUDITLOG_SUCCESS, AuditLogManager.AUDITOP_BEGIN, standby.toBriefString());
return siteMapper.map(standby);
} catch (Exception e) {
log.error("Error resuming site {}", uuid, e);
coordinator.discardTransaction();
auditDisasterRecoveryOps(OperationTypeEnum.RESUME_STANDBY, AuditLogManager.AUDITLOG_FAILURE, null, standby.toBriefString());
InternalServerErrorException resumeStandbyFailedException = APIException.internalServerErrors.resumeStandbyFailed(standby.getName(), e.getMessage());
throw resumeStandbyFailedException;
} finally {
try {
lock.release();
} catch (Exception ignore) {
log.error(String.format("Lock release failed when resuming standby site: %s", uuid));
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class DisasterRecoveryService method remove.
/**
* Remove multiple standby sites. After successfully done, it stops data replication to those sites
*
* @param idList site uuid list to be removed
* @brief Remove a list of standby sites
* @return Response
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.SECURITY_ADMIN, Role.RESTRICTED_SECURITY_ADMIN }, blockProxies = true)
@Path("/remove")
public Response remove(SiteIdListParam idList) {
List<String> siteIdList = idList.getIds();
String siteIdStr = StringUtils.join(siteIdList, ",");
log.info("Begin to remove standby site from local vdc by uuid: {}", siteIdStr);
List<Site> toBeRemovedSites = new ArrayList<>();
for (String siteId : siteIdList) {
Site site;
try {
site = drUtil.getSiteFromLocalVdc(siteId);
} catch (Exception ex) {
log.error("Can't load site {} from ZK", siteId);
throw APIException.badRequests.siteIdNotFound();
}
if (site.getState().equals(SiteState.ACTIVE)) {
log.error("Unable to remove this site {}. It is active", siteId);
throw APIException.badRequests.operationNotAllowedOnActiveSite();
}
if (site.getState().isDROperationOngoing() && !site.getState().equals(SiteState.STANDBY_SYNCING)) {
log.error("Unable to remove this site {} in state {}. " + "DR operation other than STANDBY_SYNCING is ongoing", siteId, site.getState().name());
throw APIException.internalServerErrors.concurrentDROperationNotAllowed(site.getName(), site.getState().toString());
}
toBeRemovedSites.add(site);
}
// Build a site names' string for more human-readable Exception error message
StringBuilder siteNamesSb = new StringBuilder();
for (Site site : toBeRemovedSites) {
if (siteNamesSb.length() != 0) {
siteNamesSb.append(", ");
}
siteNamesSb.append(site.getName());
}
String SiteNamesStr = siteNamesSb.toString();
try {
commonPrecheck(siteIdList);
} catch (APIException e) {
throw e;
} catch (Exception e) {
throw APIException.internalServerErrors.removeStandbyPrecheckFailed(SiteNamesStr, e.getMessage());
}
InterProcessLock lock = drUtil.getDROperationLock(false);
List<String> sitesString = new ArrayList<>();
try {
log.info("Removing sites");
coordinator.startTransaction();
for (Site site : toBeRemovedSites) {
site.setState(SiteState.STANDBY_REMOVING);
coordinator.persistServiceConfiguration(site.toConfiguration());
drUtil.recordDrOperationStatus(site.getUuid(), InterState.REMOVING_STANDBY);
sitesString.add(site.toBriefString());
}
log.info("Notify all sites for reconfig");
long vdcTargetVersion = DrUtil.newVdcConfigVersion();
for (Site standbySite : drUtil.listSites()) {
drUtil.updateVdcTargetVersion(standbySite.getUuid(), SiteInfo.DR_OP_REMOVE_STANDBY, vdcTargetVersion);
}
coordinator.commitTransaction();
auditDisasterRecoveryOps(OperationTypeEnum.REMOVE_STANDBY, AuditLogManager.AUDITLOG_SUCCESS, AuditLogManager.AUDITOP_BEGIN, StringUtils.join(sitesString, ','));
return Response.status(Response.Status.ACCEPTED).build();
} catch (Exception e) {
log.error("Failed to remove site {}", siteIdStr, e);
coordinator.discardTransaction();
auditDisasterRecoveryOps(OperationTypeEnum.REMOVE_STANDBY, AuditLogManager.AUDITLOG_FAILURE, null, StringUtils.join(sitesString, ','));
throw APIException.internalServerErrors.removeStandbyFailed(SiteNamesStr, e.getMessage());
} finally {
try {
lock.release();
} catch (Exception ignore) {
log.error(String.format("Lock release failed when removing standby sites: %s", siteIdStr));
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class NotFoundExceptionTest method idNotFoundInParam.
@Test
public void idNotFoundInParam() {
final URI id = knownId;
final APIException exception = APIException.badRequests.unableToFindEntity(id);
assertException(MESSAGE_NOT_FOUND_IN_REQUEST + id, API_PARAMETER_NOT_FOUND.getCode(), "Request parameter cannot be found", BAD_REQUEST.getStatusCode(), exception);
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class VolumeService method createVolume.
/**
* The fundamental abstraction in the Block Store is a
* volume. A volume is a unit of block storage capacity that has been
* allocated by a consumer to a project. This API allows the user to
* create one or more volumes. The volumes are created in the same
* storage pool.
*
* NOTE: This is an asynchronous operation.
*
* @prereq none
*
* @param param POST data containing the volume creation information.
*
* @brief Create volume
* @return Details of the newly created volume
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public Response createVolume(@PathParam("tenant_id") String openstackTenantId, @HeaderParam("X-Cinder-V1-Call") String isV1Call, VolumeCreateRequestGen param, @Context HttpHeaders header) throws InternalException {
// Step 1: Parameter validation
Project project = getCinderHelper().getProject(openstackTenantId, getUserFromContext());
String snapshotId = param.volume.snapshot_id;
String sourceVolId = param.volume.source_volid;
String imageId = param.volume.imageRef;
String consistencygroup_id = param.volume.consistencygroup_id;
String volume_type = param.volume.volume_type;
boolean hasConsistencyGroup = false;
if (project == null) {
if (openstackTenantId != null) {
throw APIException.badRequests.projectWithTagNonexistent(openstackTenantId);
} else {
throw APIException.badRequests.parameterIsNullOrEmpty(PROJECT_TENANTID_NULL);
}
}
URI tenantUri = project.getTenantOrg().getURI();
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, tenantUri);
if (tenant == null)
throw APIException.notFound.unableToFindUserScopeOfSystem();
_log.debug("Create volume: project = {}, tenant = {}", project.getLabel(), tenant.getLabel());
if (param.volume.size <= 0) {
_log.error("volume size should not be zero or negative ={} ", param.volume.size);
return CinderApiUtils.createErrorResponse(400, "Bad Request : Invalid Volume size");
}
long requestedSize = param.volume.size * GB;
// convert volume type from name to vpool
VirtualPool vpool = getVpool(param.volume.volume_type);
Volume sourceVolume = null;
if (vpool == null) {
if (sourceVolId != null) {
sourceVolume = findVolume(sourceVolId, openstackTenantId);
if (sourceVolume == null) {
_log.error("Invalid Source Volume ID ={} ", sourceVolId);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid Source Volume ID " + sourceVolId);
}
vpool = _dbClient.queryObject(VirtualPool.class, sourceVolume.getVirtualPool());
} else {
_log.error("Invalid Volume Type ={} ", volume_type);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid Volume Type " + volume_type);
}
}
if (!validateVolumeCreate(openstackTenantId, null, requestedSize)) {
_log.info("The volume can not be created because of insufficient project quota.");
throw APIException.badRequests.insufficientQuotaForProject(project.getLabel(), "volume");
} else if (!validateVolumeCreate(openstackTenantId, vpool, requestedSize)) {
_log.info("The volume can not be created because of insufficient quota for virtual pool.");
throw APIException.badRequests.insufficientQuotaForVirtualPool(vpool.getLabel(), "virtual pool");
}
_log.debug("Create volume: vpool = {}", vpool.getLabel());
VirtualArray varray = getCinderHelper().getVarray(param.volume.availability_zone, getUserFromContext());
if ((snapshotId == null) && (sourceVolId == null) && (varray == null)) {
// otherwise availability_zone exception will be thrown
throw APIException.badRequests.parameterIsNotValid(param.volume.availability_zone);
}
// Validating consistency group
URI blockConsistencyGroupId = null;
BlockConsistencyGroup blockConsistencyGroup = null;
if (consistencygroup_id != null) {
_log.info("Verifying for consistency group : " + consistencygroup_id);
blockConsistencyGroup = (BlockConsistencyGroup) getCinderHelper().queryByTag(URI.create(consistencygroup_id), getUserFromContext(), BlockConsistencyGroup.class);
if (getCinderHelper().verifyConsistencyGroupHasSnapshot(blockConsistencyGroup)) {
_log.error("Bad Request : Consistency Group has Snapshot ");
return CinderApiUtils.createErrorResponse(400, "Bad Request : Consistency Group has Snapshot ");
}
blockConsistencyGroupId = blockConsistencyGroup.getId();
if (blockConsistencyGroup.getTag() != null && consistencygroup_id.equals(blockConsistencyGroupId.toString().split(":")[3])) {
for (ScopedLabel tag : blockConsistencyGroup.getTag()) {
if (tag.getScope().equals("volume_types")) {
if (tag.getLabel().equals(volume_type)) {
hasConsistencyGroup = true;
} else {
return CinderApiUtils.createErrorResponse(404, "Invalid volume: No consistency group exist for volume : " + param.volume.display_name);
}
}
}
} else {
return CinderApiUtils.createErrorResponse(404, "Invalid Consistency Group Id : No Such Consistency group exists");
}
}
BlockSnapshot snapshot = null;
URI snapUri = null;
if (snapshotId != null) {
snapshot = (BlockSnapshot) getCinderHelper().queryByTag(URI.create(snapshotId), getUserFromContext(), BlockSnapshot.class);
if (snapshot == null) {
_log.error("Invalid snapshot id ={} ", snapshotId);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid snapshot id" + snapshotId);
} else {
snapUri = snapshot.getId();
URI varrayUri = snapshot.getVirtualArray();
if (varray == null) {
varray = _dbClient.queryObject(VirtualArray.class, varrayUri);
}
}
}
if (varray != null)
_log.info("Create volume: varray = {}", varray.getLabel());
String name = null;
String description = null;
_log.info("is isV1Call {}", isV1Call);
_log.info("name = {}, description = {}", name, description);
if (isV1Call != null) {
name = param.volume.display_name;
description = param.volume.display_description;
} else {
name = param.volume.name;
description = param.volume.description;
}
if (name == null) {
name = "volume-" + RandomStringUtils.random(10);
}
_log.info("param.volume.name = {}, param.volume.display_name = {}", param.volume.name, param.volume.display_name);
_log.info("param.volume.description = {}, param.volume.display_description = {}", param.volume.description, param.volume.display_description);
if (name == null || (name.length() <= 2))
throw APIException.badRequests.parameterIsNotValid(name);
URI projectUri = project.getId();
checkForDuplicateName(name, Volume.class, projectUri, "project", _dbClient);
// Step 2: Check if the user has rights for volume create
verifyUserIsAuthorizedForRequest(project, vpool, varray);
// Step 3: Check capacity Quotas
_log.debug(" volume name = {}, size = {} GB", name, param.volume.size);
int volumeCount = 1;
VolumeCreate volumeCreate = new VolumeCreate(name, Long.toString(requestedSize), volumeCount, vpool.getId(), varray.getId(), project.getId());
BlockServiceApi api = getBlockServiceImpl(vpool, _dbClient);
CapacityUtils.validateQuotasForProvisioning(_dbClient, vpool, project, tenant, requestedSize, "volume");
// Step 4: Call out placementManager to get the recommendation for placement.
VirtualPoolCapabilityValuesWrapper capabilities = new VirtualPoolCapabilityValuesWrapper();
capabilities.put(VirtualPoolCapabilityValuesWrapper.RESOURCE_COUNT, volumeCount);
capabilities.put(VirtualPoolCapabilityValuesWrapper.SIZE, requestedSize);
// Create a unique task id if one is not passed in the request.
String task = UUID.randomUUID().toString();
TaskList tasklist = null;
BlockFullCopyManager blkFullCpManager = new BlockFullCopyManager(_dbClient, _permissionsHelper, _auditMgr, _coordinator, _placementManager, sc, uriInfo, _request, null);
if (hasConsistencyGroup && blockConsistencyGroupId != null) {
try {
checkForConsistencyGroup(vpool, blockConsistencyGroup, project, api, varray, capabilities, blkFullCpManager);
volumeCreate.setConsistencyGroup(blockConsistencyGroupId);
} catch (APIException exp) {
return CinderApiUtils.createErrorResponse(400, "Bad Request : can't create volume for the consistency group : " + blockConsistencyGroupId);
}
}
if (sourceVolId != null) {
_log.debug("Creating New Volume from Volume : Source volume ID ={}", sourceVolId);
if (sourceVolume != null) {
Volume vol = findVolume(sourceVolId, openstackTenantId);
if (vol == null) {
_log.debug("Creating Clone Volume failed : Invalid source volume id ");
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid source volume id" + sourceVolId);
}
tasklist = volumeClone(name, project, sourceVolId, varray, volumeCount, sourceVolume, blkFullCpManager);
} else {
_log.debug("Creating Clone Volume failed : Null Source volume ");
return CinderApiUtils.createErrorResponse(404, "Not Found : Null source volume ");
}
} else if (snapshotId != null) {
_log.debug("Creating New Volume from Snapshot ID ={}", snapshotId);
tasklist = volumeFromSnapshot(name, project, snapshotId, varray, param, volumeCount, blkFullCpManager, snapUri, snapshot);
} else if ((snapshotId == null) && (sourceVolId == null)) {
_log.debug("Creating New Volume where snapshotId and sourceVolId are null");
tasklist = newVolume(volumeCreate, project, api, capabilities, varray, task, vpool, param, volumeCount, requestedSize, name);
}
if (imageId != null) {
_log.debug("Creating New Volume from imageid ={}", imageId);
// will be implemented
tasklist = volumeFromImage(name, project, varray, param, volumeCount, blkFullCpManager, imageId);
}
if (!(tasklist.getTaskList().isEmpty())) {
for (TaskResourceRep rep : tasklist.getTaskList()) {
URI volumeUri = rep.getResource().getId();
Volume vol = _dbClient.queryObject(Volume.class, volumeUri);
if (vol != null) {
StringMap extensions = vol.getExtensions();
if (extensions == null)
extensions = new StringMap();
extensions.put("display_description", (description == null) ? "" : description);
vol.setExtensions(extensions);
ScopedLabelSet tagSet = new ScopedLabelSet();
vol.setTag(tagSet);
String[] splits = volumeUri.toString().split(":");
String tagName = splits[3];
if (tagName == null || tagName.isEmpty() || tagName.length() < 2) {
throw APIException.badRequests.parameterTooShortOrEmpty("Tag", 2);
}
URI tenantOwner = vol.getTenant().getURI();
ScopedLabel tagLabel = new ScopedLabel(tenantOwner.toString(), tagName);
tagSet.add(tagLabel);
_dbClient.updateAndReindexObject(vol);
if (isV1Call != null) {
_log.debug("Inside V1 call");
return CinderApiUtils.getCinderResponse(getVolumeDetail(vol, isV1Call, openstackTenantId), header, true, CinderConstants.STATUS_OK);
} else {
return CinderApiUtils.getCinderResponse(getVolumeDetail(vol, isV1Call, openstackTenantId), header, true, CinderConstants.STATUS_ACCEPT);
}
} else {
throw APIException.badRequests.parameterIsNullOrEmpty("Volume");
}
}
}
return CinderApiUtils.getCinderResponse(new VolumeDetail(), header, true, CinderConstants.STATUS_ACCEPT);
}
use of com.emc.storageos.svcs.errorhandling.resources.APIException in project coprhd-controller by CoprHD.
the class BlockSnapshotSessionManager method createSnapshotSession.
@SuppressWarnings("unchecked")
public TaskList createSnapshotSession(BlockConsistencyGroup cg, SnapshotSessionCreateParam param, BlockFullCopyManager fcManager) {
Table<URI, String, List<Volume>> storageRgToVolumes = null;
if (!param.getVolumes().isEmpty()) {
// volume group snapshot session
// group volumes by backend storage system and replication group
storageRgToVolumes = BlockServiceUtils.getReplicationGroupVolumes(param.getVolumes(), cg.getId(), _dbClient, _uriInfo);
} else {
// CG snapshot session
storageRgToVolumes = BlockServiceUtils.getReplicationGroupVolumes(BlockConsistencyGroupUtils.getAllCGVolumes(cg, _dbClient), _dbClient);
}
TaskList taskList = new TaskList();
for (Cell<URI, String, List<Volume>> cell : storageRgToVolumes.cellSet()) {
String rgName = cell.getColumnKey();
List<Volume> volumeList = cell.getValue();
s_logger.info("Processing Replication Group {}, Volumes {}", rgName, Joiner.on(',').join(transform(volumeList, fctnDataObjectToID())));
if (volumeList == null || volumeList.isEmpty()) {
s_logger.warn(String.format("No volume in replication group %s", rgName));
continue;
}
try {
taskList.getTaskList().addAll(createSnapshotSession(((List<BlockObject>) (List<?>) volumeList), param, fcManager).getTaskList());
} catch (InternalException | APIException e) {
s_logger.error("Exception when creating snapshot session for replication group {}", rgName, e);
TaskResourceRep task = BlockServiceUtils.createFailedTaskOnCG(_dbClient, cg, ResourceOperationTypeEnum.CREATE_CONSISTENCY_GROUP_SNAPSHOT_SESSION, e);
taskList.addTask(task);
} catch (Exception ex) {
s_logger.error("Unexpected Exception occurred when creating snapshot session for replication group {}", rgName, ex);
}
}
return taskList;
}
Aggregations