use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class BlockVolumeIngestOrchestrator method ingestBlockObjects.
@Override
protected <T extends BlockObject> T ingestBlockObjects(IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
boolean unManagedVolumeExported = requestContext.getVolumeContext().isVolumeExported();
Volume volume = null;
List<BlockSnapshotSession> snapSessions = new ArrayList<BlockSnapshotSession>();
URI unManagedVolumeUri = unManagedVolume.getId();
String volumeNativeGuid = unManagedVolume.getNativeGuid().replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
volume = VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);
// Check if ingested volume has export masks pending for ingestion.
if (isExportIngestionPending(volume, unManagedVolumeUri, unManagedVolumeExported)) {
return clazz.cast(volume);
}
if (null == volume) {
validateUnManagedVolume(unManagedVolume, requestContext.getVpool(unManagedVolume));
// @TODO Need to revisit this. In 8.x Provider, ReplicationGroup is automatically created when a volume is associated to a
// StorageGroup.
// checkUnManagedVolumeAddedToCG(unManagedVolume, virtualArray, tenant, project, vPool);
checkVolumeExportState(unManagedVolume, unManagedVolumeExported);
checkVPoolValidForExportInitiatorProtocols(requestContext.getVpool(unManagedVolume), unManagedVolume);
checkHostIOLimits(requestContext.getVpool(unManagedVolume), unManagedVolume, unManagedVolumeExported);
StoragePool pool = validateAndReturnStoragePoolInVAarray(unManagedVolume, requestContext.getVarray(unManagedVolume));
// validate quota is exceeded for storage systems and pools
checkSystemResourceLimitsExceeded(requestContext.getStorageSystem(), unManagedVolume, requestContext.getExhaustedStorageSystems());
checkPoolResourceLimitsExceeded(requestContext.getStorageSystem(), pool, unManagedVolume, requestContext.getExhaustedPools());
String autoTierPolicyId = getAutoTierPolicy(unManagedVolume, requestContext.getStorageSystem(), requestContext.getVpool(unManagedVolume));
validateAutoTierPolicy(autoTierPolicyId, unManagedVolume, requestContext.getVpool(unManagedVolume));
volume = createVolume(requestContext, volumeNativeGuid, pool, unManagedVolume, autoTierPolicyId);
}
if (volume != null) {
String syncActive = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.IS_SYNC_ACTIVE.toString(), unManagedVolume.getVolumeInformation());
boolean isSyncActive = (null != syncActive) ? Boolean.parseBoolean(syncActive) : false;
volume.setSyncActive(isSyncActive);
if (VolumeIngestionUtil.isFullCopy(unManagedVolume)) {
_logger.info("Setting clone related properties {}", unManagedVolume.getId());
String replicaState = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.REPLICA_STATE.toString(), unManagedVolume.getVolumeInformation());
volume.setReplicaState(replicaState);
String replicationGroupName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.FULL_COPY_CONSISTENCY_GROUP_NAME.toString(), unManagedVolume.getVolumeInformation());
if (replicationGroupName != null && !replicationGroupName.isEmpty()) {
volume.setReplicationGroupInstance(replicationGroupName);
}
}
// Create snapshot sessions for each synchronization aspect for the volume.
StringSet syncAspectInfoForVolume = PropertySetterUtil.extractValuesFromStringSet(SupportedVolumeInformation.SNAPSHOT_SESSIONS.toString(), unManagedVolume.getVolumeInformation());
if ((syncAspectInfoForVolume != null) && (!syncAspectInfoForVolume.isEmpty())) {
Project project = requestContext.getProject();
// If this is a vplex backend volume, then the front end project should be set as snapshot session's project
if (requestContext instanceof VplexVolumeIngestionContext && VolumeIngestionUtil.isVplexBackendVolume(unManagedVolume)) {
project = ((VplexVolumeIngestionContext) requestContext).getFrontendProject();
}
for (String syncAspectInfo : syncAspectInfoForVolume) {
String[] syncAspectInfoComponents = syncAspectInfo.split(":");
String syncAspectName = syncAspectInfoComponents[0];
String syncAspectObjPath = syncAspectInfoComponents[1];
// Make sure it is not already created.
URIQueryResultList queryResults = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotSessionBySessionInstance(syncAspectObjPath), queryResults);
Iterator<URI> queryResultsIter = queryResults.iterator();
if (!queryResultsIter.hasNext()) {
BlockSnapshotSession session = new BlockSnapshotSession();
session.setId(URIUtil.createId(BlockSnapshotSession.class));
session.setLabel(syncAspectName);
session.setSessionLabel(syncAspectName);
session.setParent(new NamedURI(volume.getId(), volume.getLabel()));
session.setProject(new NamedURI(project.getId(), project.getLabel()));
session.setStorageController(volume.getStorageController());
session.setSessionInstance(syncAspectObjPath);
StringSet linkedTargetURIs = new StringSet();
URIQueryResultList snapshotQueryResults = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotBySettingsInstance(syncAspectObjPath), snapshotQueryResults);
Iterator<URI> snapshotQueryResultsIter = snapshotQueryResults.iterator();
while (snapshotQueryResultsIter.hasNext()) {
linkedTargetURIs.add(snapshotQueryResultsIter.next().toString());
}
session.setLinkedTargets(linkedTargetURIs);
session.setOpStatus(new OpStatusMap());
snapSessions.add(session);
}
}
if (!snapSessions.isEmpty()) {
_dbClient.createObject(snapSessions);
}
}
}
// Note that a VPLEX backend volume can also be a snapshot target volume.
// When the VPLEX ingest orchestrator is executed, it gets the ingestion
// strategy for the backend volume and executes it. If the backend volume
// is both a snapshot and a VPLEX backend volume, this local volume ingest
// strategy is invoked and a Volume instance will result. That is fine because
// we need to represent that VPLEX backend volume. However, we also need a
// BlockSnapshot instance to represent the snapshot target volume. Therefore,
// if the unmanaged volume is also a snapshot target volume, we get and
// execute the local snapshot ingest strategy to create this BlockSnapshot
// instance and we add it to the created object list. Note that since the
// BlockSnapshot is added to the created objects list and the Volume and
// BlockSnapshot instance will have the same native GUID, we must be careful
// about adding the Volume to the created object list in the VPLEX ingestion
// strategy.
BlockObject snapshot = null;
if (VolumeIngestionUtil.isSnapshot(unManagedVolume)) {
String strategyKey = ReplicationStrategy.LOCAL.name() + "_" + VolumeType.SNAPSHOT.name();
IngestStrategy ingestStrategy = ingestStrategyFactory.getIngestStrategy(IngestStrategyEnum.getIngestStrategy(strategyKey));
snapshot = ingestStrategy.ingestBlockObjects(requestContext, BlockSnapshot.class);
requestContext.getBlockObjectsToBeCreatedMap().put(snapshot.getNativeGuid(), snapshot);
}
// Run this always when volume NO_PUBLIC_ACCESS
if (markUnManagedVolumeInactive(requestContext, volume)) {
_logger.info("All the related replicas and parent has been ingested ", unManagedVolume.getNativeGuid());
// RP masks.
if (!unManagedVolumeExported && !VolumeIngestionUtil.checkUnManagedResourceIsRecoverPointEnabled(unManagedVolume)) {
unManagedVolume.setInactive(true);
requestContext.getUnManagedVolumesToBeDeleted().add(unManagedVolume);
}
} else if (volume != null) {
_logger.info("Not all the parent/replicas of unManagedVolume {} have been ingested , hence marking as internal", unManagedVolume.getNativeGuid());
volume.addInternalFlags(INTERNAL_VOLUME_FLAGS);
for (BlockSnapshotSession snapSession : snapSessions) {
snapSession.addInternalFlags(INTERNAL_VOLUME_FLAGS);
}
_dbClient.updateObject(snapSessions);
}
return clazz.cast(volume);
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method ingestBlockObjects.
@Override
public <T extends BlockObject> T ingestBlockObjects(IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {
refreshCaches(requestContext.getStorageSystem());
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
VolumeIngestionUtil.checkValidVarrayForUnmanagedVolume(unManagedVolume, requestContext.getVarray(unManagedVolume).getId(), getClusterIdToNameMap(requestContext.getStorageSystem()), getVarrayToClusterIdMap(requestContext.getStorageSystem()), _dbClient);
String vplexIngestionMethod = requestContext.getVplexIngestionMethod();
_logger.info("VPLEX ingestion method is " + vplexIngestionMethod);
boolean ingestBackend = (null == vplexIngestionMethod) || vplexIngestionMethod.isEmpty() || (!vplexIngestionMethod.equals(VplexBackendIngestionContext.INGESTION_METHOD_VVOL_ONLY));
VplexVolumeIngestionContext volumeContext = null;
boolean isRpVplexContext = requestContext.getVolumeContext() instanceof RpVplexVolumeIngestionContext;
if (isRpVplexContext) {
// if this volume is RP/VPLEX, we need to get the volume context
// from the RpVplexVolumeIngestionContext
volumeContext = ((RpVplexVolumeIngestionContext) requestContext.getVolumeContext()).getVplexVolumeIngestionContext();
} else {
// this is just a plain VPLEX volume backend ingestion
volumeContext = (VplexVolumeIngestionContext) requestContext.getVolumeContext();
}
// set the name of the cluster to which this virtual volume ingestion request's varray is connected
String clusterName = getClusterNameForVarray(requestContext.getVarray(unManagedVolume), requestContext.getStorageSystem());
volumeContext.setVirtualVolumeVplexClusterName(clusterName);
// determine if the backend has already been ingested. this could be the case if the volume is
// exported via multipe varrays or hosts and needs to be ingested for export multiple times
String volumeNativeGuid = unManagedVolume.getNativeGuid().replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
Volume volume = VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);
boolean backendAlreadyIngested = volume != null && volume.getAssociatedVolumes() != null && !volume.getAssociatedVolumes().isEmpty();
if (backendAlreadyIngested) {
_logger.info("backend volumes have already been ingested for UnManagedVolume {}", unManagedVolume.forDisplay());
} else if (ingestBackend) {
volumeContext.setIngestionInProgress(true);
//
// If the "Only During Discovery" system setting is set, no new data will
// be fetched during ingestion. This assumes that all data has been collected
// during discovery and ingestion will fail if it can't find all the required data.
//
// If "Only During Ingestion" or "During Discovery and Ingestion" mode is set,
// then an attempt will be made to query the VPLEX api now to find any incomplete data,
// but the database will be checked first.
//
// The default mode is "Only During Discovery", so the user needs to remember
// to run discovery first on all backend storage arrays before running on the VPLEX.
//
_discoveryMode = ControllerUtils.getPropertyValueFromCoordinator(_coordinator, VplexBackendIngestionContext.DISCOVERY_MODE);
if (VplexBackendIngestionContext.DISCOVERY_MODE_DISCOVERY_ONLY.equals(_discoveryMode) || VplexBackendIngestionContext.DISCOVERY_MODE_DB_ONLY.equals(_discoveryMode)) {
volumeContext.setInDiscoveryOnlyMode(true);
}
// the backend volumes and export masks will be part of the VPLEX project
// rather than the front-end virtual volume project, so we need to set that in the context
Project vplexProject = VPlexBlockServiceApiImpl.getVplexProject(requestContext.getStorageSystem(), _dbClient, _tenantsService);
volumeContext.setBackendProject(vplexProject);
volumeContext.setFrontendProject(requestContext.getProject());
try {
_logger.info("Ingesting backend structure of VPLEX virtual volume {}", unManagedVolume.getLabel());
validateContext(requestContext.getVpool(unManagedVolume), requestContext.getTenant(), volumeContext);
ingestBackendVolumes(requestContext, volumeContext);
ingestBackendExportMasks(requestContext, volumeContext);
_logger.info("Backend ingestion ended:" + volumeContext.toStringDebug());
} catch (Exception ex) {
_logger.error("error during VPLEX backend ingestion: ", ex);
throw IngestionException.exceptions.failedToIngestVplexBackend(ex.getLocalizedMessage());
}
}
_logger.info("Ingesting VPLEX virtual volume {}", unManagedVolume.getLabel());
T virtualVolume = super.ingestBlockObjects(requestContext, clazz);
return virtualVolume;
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class ConsistencyGroupService method createConsistencyGroup.
/**
* Create Consistency group
*
* @param openstackTenantId openstack tenant id
* @param param pojo class to bind request
* @param isV1Call cinder V1 api
* @param header HTTP header
* @brief Create Consistency group
* @return Response
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public Response createConsistencyGroup(@PathParam("tenant_id") String openstackTenantId, ConsistencyGroupCreateRequest param, @HeaderParam("X-Cinder-V1-Call") String isV1Call, @Context HttpHeaders header) {
_log.info("Creating Consistency Group : " + param.consistencygroup.name);
ConsistencyGroupCreateResponse cgResponse = new ConsistencyGroupCreateResponse();
final Project project = getCinderHelper().getProject(openstackTenantId, getUserFromContext());
final String volumeTypes = param.consistencygroup.volume_types;
VirtualPool vPool = getCinderHelper().getVpool(volumeTypes);
if (null != project && vPool != null) {
if (!vPool.getMultivolumeConsistency()) {
_log.error("Bad Request : Multi volume consistency is not enabled in the volume type {}", volumeTypes);
return CinderApiUtils.createErrorResponse(400, "Bad Request : Multi volume consistency is not enabled");
}
// Validate name
ArgValidator.checkFieldNotEmpty(param.consistencygroup.name, "name");
checkForDuplicateName(param.consistencygroup.name, BlockConsistencyGroup.class);
// Validate name not greater than 64 characters
ArgValidator.checkFieldLengthMaximum(param.consistencygroup.name, CG_MAX_LIMIT, "name");
// Create Consistency Group in db
final BlockConsistencyGroup consistencyGroup = new BlockConsistencyGroup();
consistencyGroup.setId(URIUtil.createId(BlockConsistencyGroup.class));
consistencyGroup.setLabel(param.consistencygroup.name);
consistencyGroup.setProject(new NamedURI(project.getId(), project.getLabel()));
consistencyGroup.setTenant(project.getTenantOrg());
consistencyGroup.setCreationTime(Calendar.getInstance());
ScopedLabelSet tagSet = new ScopedLabelSet();
consistencyGroup.setTag(tagSet);
tagSet.add(new ScopedLabel("volume_types", volumeTypes));
tagSet.add(new ScopedLabel("status", "available"));
tagSet.add(new ScopedLabel("availability_zone", (param.consistencygroup.availability_zone != null) ? param.consistencygroup.availability_zone : "nova"));
tagSet.add(new ScopedLabel("description", (param.consistencygroup.description != null) ? param.consistencygroup.description : "No Description"));
tagSet.add(new ScopedLabel(project.getTenantOrg().getURI().toString(), CinderApiUtils.splitString(consistencyGroup.getId().toString(), ":", 3)));
_dbClient.createObject(consistencyGroup);
cgResponse.id = CinderApiUtils.splitString(consistencyGroup.getId().toString(), ":", 3);
cgResponse.name = consistencyGroup.getLabel();
return CinderApiUtils.getCinderResponse(cgResponse, header, true, CinderConstants.STATUS_OK);
} else {
return CinderApiUtils.createErrorResponse(400, "Bad Request : can't create consistency group due to invalid argument");
}
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class ConsistencyGroupService method getCosistencyGroup.
/**
* This function handles Get request for a consistency group detail
*
* @param openstackTenantId Openstack tenant id
* @param consistencyGroupId Consistency group id
* @param isV1Call openstack cinder V1 call
* @param header HTTP header
* @brief Get Consistency Group Info
* @return Response
*/
@GET
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{consistencyGroup_id}")
@CheckPermission(roles = { Role.SYSTEM_MONITOR, Role.TENANT_ADMIN }, acls = { ACL.ANY })
public Response getCosistencyGroup(@PathParam("tenant_id") String openstackTenantId, @PathParam("consistencyGroup_id") String consistencyGroupId, @HeaderParam("X-Cinder-V1-Call") String isV1Call, @Context HttpHeaders header) {
Project project = getCinderHelper().getProject(openstackTenantId, getUserFromContext());
if (project == null) {
String message = "Bad Request: Project with the OpenStack Tenant Id : " + openstackTenantId + " does not exist";
_log.error(message);
return CinderApiUtils.createErrorResponse(400, message);
}
final BlockConsistencyGroup blockConsistencyGroup = findConsistencyGroup(consistencyGroupId, openstackTenantId);
if (blockConsistencyGroup == null) {
return CinderApiUtils.createErrorResponse(404, "Invalid Request: No Such Consistency Group Found");
} else if (!consistencyGroupId.equals(CinderApiUtils.splitString(blockConsistencyGroup.getId().toString(), ":", 3))) {
_log.error("Bad Request : There is no consistency group with id {} , please retry with correct consistency group id", consistencyGroupId);
return CinderApiUtils.createErrorResponse(400, "Bad Request : There is no consistency group exist, please retry with correct consistency group id");
} else {
ConsistencyGroupDetail response = getConsistencyGroupDetail(blockConsistencyGroup);
return CinderApiUtils.getCinderResponse(response, header, true, CinderConstants.STATUS_OK);
}
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class ConsistencyGroupSnapshotService method createConsistencyGroupSnapshot.
/**
* Create consistency group snapshot
*
* @param openstackTenantId
* openstack tenant Id
* @param param
* Pojo class to bind request
* @param isV1Call
* cinder V1 api
* @param header
* HTTP header
* @brief Create Consistency Group Snapshot
* @return Response
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public Response createConsistencyGroupSnapshot(@PathParam("tenant_id") String openstackTenantId, final ConsistencyGroupSnapshotCreateRequest param, @HeaderParam("X-Cinder-V1-Call") String isV1Call, @Context HttpHeaders header) {
// Query Consistency Group
final String consistencyGroupId = param.cgsnapshot.consistencygroup_id;
final BlockConsistencyGroup consistencyGroup = findConsistencyGroup(consistencyGroupId, openstackTenantId);
if (consistencyGroup == null) {
_log.error("Not Found : No Such Consistency Group Found {}", consistencyGroupId);
return CinderApiUtils.createErrorResponse(404, "Not Found : No Such Consistency Group Found");
} else if (!consistencyGroupId.equals(CinderApiUtils.splitString(consistencyGroup.getId().toString(), ":", 3))) {
_log.error("Bad Request : Invalid Snapshot Id {} : Please enter valid or full Id", consistencyGroupId);
return CinderApiUtils.createErrorResponse(400, "Bad Request : No such consistency id exist, Please enter valid or full Id");
}
if (!isSnapshotCreationpermissible(consistencyGroup)) {
_log.error("Bad Request : vpool not being configured for the snapshots creation");
return CinderApiUtils.createErrorResponse(400, "Bad Request : vpool not being configured for the snapshots creation");
}
// system types.
if (!consistencyGroup.created()) {
CinderApiUtils.createErrorResponse(400, "No such consistency group created");
}
Project project = getCinderHelper().getProject(openstackTenantId, getUserFromContext());
URI cgStorageControllerURI = consistencyGroup.getStorageController();
if (!NullColumnValueGetter.isNullURI(cgStorageControllerURI)) {
// No snapshots for VPLEX consistency groups.
StorageSystem cgStorageController = _dbClient.queryObject(StorageSystem.class, cgStorageControllerURI);
if ((DiscoveredDataObject.Type.vplex.name().equals(cgStorageController.getSystemType())) && (!consistencyGroup.checkForType(Types.LOCAL))) {
CinderApiUtils.createErrorResponse(400, "can't create snapshot for VPLEX");
}
}
// Get the block service implementation
BlockServiceApi blockServiceApiImpl = getBlockServiceImpl(consistencyGroup);
// Get the volumes in the consistency group.
List<Volume> volumeList = blockServiceApiImpl.getActiveCGVolumes(consistencyGroup);
_log.info("Active CG volume list : " + volumeList);
// Generate task id
String taskId = UUID.randomUUID().toString();
// Set snapshot type.
String snapshotType = BlockSnapshot.TechnologyType.NATIVE.toString();
if (consistencyGroup.checkForType(BlockConsistencyGroup.Types.RP)) {
snapshotType = BlockSnapshot.TechnologyType.RP.toString();
} else if ((!volumeList.isEmpty()) && (volumeList.get(0).checkForSRDF())) {
snapshotType = BlockSnapshot.TechnologyType.SRDF.toString();
}
// Determine the snapshot volume for RP.
Volume snapVolume = null;
if (consistencyGroup.checkForType(BlockConsistencyGroup.Types.RP)) {
for (Volume volumeToSnap : volumeList) {
// Get the RP source volume.
if (volumeToSnap.getPersonality() != null && volumeToSnap.getPersonality().equals(Volume.PersonalityTypes.SOURCE.toString())) {
snapVolume = volumeToSnap;
break;
}
}
} else if (!volumeList.isEmpty()) {
// Any volume.
snapVolume = volumeList.get(0);
}
// Set the create inactive flag.
Boolean createInactive = Boolean.FALSE;
Boolean readOnly = Boolean.FALSE;
// Validate the snapshot request.
String snapshotName = param.cgsnapshot.name;
blockServiceApiImpl.validateCreateSnapshot(snapVolume, volumeList, snapshotType, snapshotName, readOnly, getFullCopyManager());
// Prepare and create the snapshots for the group.
List<URI> snapIdList = new ArrayList<URI>();
List<BlockSnapshot> snapshotList = new ArrayList<BlockSnapshot>();
TaskList response = new TaskList();
snapshotList.addAll(blockServiceApiImpl.prepareSnapshots(volumeList, snapshotType, snapshotName, snapIdList, taskId));
for (BlockSnapshot snapshot : snapshotList) {
response.getTaskList().add(toTask(snapshot, taskId));
}
blockServiceApiImpl.createSnapshot(snapVolume, snapIdList, snapshotType, createInactive, readOnly, taskId);
auditBlockConsistencyGroup(OperationTypeEnum.CREATE_CONSISTENCY_GROUP_SNAPSHOT, AuditLogManager.AUDITLOG_SUCCESS, AuditLogManager.AUDITOP_BEGIN, param.cgsnapshot.name, consistencyGroup.getId().toString());
ConsistencyGroupSnapshotCreateResponse cgSnapshotCreateRes = new ConsistencyGroupSnapshotCreateResponse();
for (TaskResourceRep rep : response.getTaskList()) {
URI snapshotUri = rep.getResource().getId();
BlockSnapshot snap = _dbClient.queryObject(BlockSnapshot.class, snapshotUri);
snap.setId(snapshotUri);
snap.setConsistencyGroup(consistencyGroup.getId());
snap.setLabel(snapshotName);
if (snap != null) {
StringMap extensions = snap.getExtensions();
if (extensions == null) {
extensions = new StringMap();
}
extensions.put("status", CinderConstants.ComponentStatus.CREATING.getStatus().toLowerCase());
extensions.put("taskid", rep.getId().toString());
snap.setExtensions(extensions);
ScopedLabelSet tagSet = new ScopedLabelSet();
snap.setTag(tagSet);
tagSet.add(new ScopedLabel(project.getTenantOrg().getURI().toString(), CinderApiUtils.splitString(snapshotUri.toString(), ":", 3)));
}
_dbClient.updateObject(snap);
cgSnapshotCreateRes.id = CinderApiUtils.splitString(snapshotUri.toString(), ":", 3);
cgSnapshotCreateRes.name = param.cgsnapshot.name;
}
return CinderApiUtils.getCinderResponse(cgSnapshotCreateRes, header, true, CinderConstants.STATUS_OK);
}
Aggregations