use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method prepareRecommendedVolumes.
/**
* Prepare Recommended Volumes for Protected scenarios only.
*
* This method is responsible for acting the same as the unprotected "prepareRecommendedVolumes" call,
* however it needs to create multiple volumes per single volume requests in order to generate protection.
*
* Those most typical scenario is, that for any one volume requested in a CRR configuration, we create:
* 1. One Source Volume
* 2. One Source Journal Volume (minimum 10GB, otherwise 2.5X source size)
* 3. One Target Volume on protection varray
* 4. One Target Journal Volume on protection varray
*
* In a CLR configuration, there are additional volumes created for the Local Target and Local Target Journal.
*
* This method will assemble a ProtectionSet object in Cassandra that will describe the Protection that
* will be created on the Protection System.
*
* When other protection mechanisms come on board, the RP-ness of this method will need to be pulled out.
*
* @param param volume create request
* @param task task from request or generated
* @param taskList task list
* @param project project from request
* @param originalVarray varray from request
* @param originalVpool vpool from request
* @param numberOfVolumesInRequest volume count from the request
* @param recommendations list of resulting recommendations from placement
* @param consistencyGroup consistency group ID
* @param capabilities Capabilities object
* @param descriptors List of descriptors to be populated
* @param volumeURIs List to hold volumes that have been prepared
*/
private void prepareRecommendedVolumes(VolumeCreate param, String task, TaskList taskList, Project project, VirtualArray originalVarray, VirtualPool originalVpool, Integer numberOfVolumesInRequest, List<Recommendation> recommendations, String volumeLabel, VirtualPoolCapabilityValuesWrapper capabilities, List<VolumeDescriptor> descriptors, List<URI> volumeURIs) throws APIException {
boolean isChangeVpool = false;
boolean isChangeVpoolForProtectedVolume = false;
boolean isSrcAndHaSwapped = VirtualPool.isRPVPlexProtectHASide(originalVpool);
boolean metroPointEnabled = VirtualPool.vPoolSpecifiesMetroPoint(originalVpool);
// This copy of capabilities object is meant to be used by all volume prepares that require changing data,
// which is our case is TARGET and JOURNALS. SOURCE will use always use the main capabilities object.
VirtualPoolCapabilityValuesWrapper copyOfCapabilities = new VirtualPoolCapabilityValuesWrapper(capabilities);
// Set the volume name from the param
String volumeName = volumeLabel;
// Need to check if we should swap src and ha, call the block scheduler code to
// find out. Nothing will be changed for MetroPoint.
VirtualArray haVarray = null;
VirtualPool haVpool = null;
SwapContainer container = this.getBlockScheduler().new SwapContainer();
container.setSrcVarray(originalVarray);
container.setSrcVpool(originalVpool);
container.setHaVarray(haVarray);
container.setHaVpool(haVpool);
container = RecoverPointScheduler.initializeSwapContainer(container, _dbClient);
// Use the new references post swap
VirtualArray varray = container.getSrcVarray();
VirtualPool vpool = container.getSrcVpool();
// Save a reference to the CG, we'll need this later
BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// Total volumes to be created
int totalVolumeCount = 0;
// Create an entire Protection object for each recommendation result.
Iterator<Recommendation> recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
RPProtectionRecommendation rpProtectionRec = (RPProtectionRecommendation) recommendationsIter.next();
URI protectionSystemURI = rpProtectionRec.getProtectionDevice();
URI changeVpoolVolumeURI = rpProtectionRec.getVpoolChangeVolume();
Volume changeVpoolVolume = (changeVpoolVolumeURI == null ? null : _dbClient.queryObject(Volume.class, changeVpoolVolumeURI));
isChangeVpool = (changeVpoolVolumeURI != null);
isChangeVpoolForProtectedVolume = rpProtectionRec.isVpoolChangeProtectionAlreadyExists();
boolean addJournalForStandbySourceCopy = capabilities.getAddJournalCapacity() && (rpProtectionRec.getStandbyJournalRecommendation() != null);
String newVolumeLabel = volumeName;
// Find the Source RP Copy Name
String sourceCopyName = retrieveRpCopyName(originalVpool, varray, consistencyGroup, true);
String standbySourceCopyName = "";
if (addJournalForStandbySourceCopy) {
// Find the Source Standby RP Copy Name - for add journal operation
standbySourceCopyName = retrieveRpCopyName(originalVpool, varray, consistencyGroup, true);
}
if (metroPointEnabled) {
// Find the Source Standby RP Copy Name - for MetorPoint
haVarray = _dbClient.queryObject(VirtualArray.class, VPlexUtil.getHAVarray(originalVpool));
standbySourceCopyName = retrieveRpCopyName(originalVpool, haVarray, consistencyGroup, true);
}
StringBuffer volumeInfoBuffer = new StringBuffer();
volumeInfoBuffer.append(String.format(NEW_LINE));
// Prepare the Journals first
try {
prepareRpJournals(rpProtectionRec, project, consistencyGroup, vpool, originalVpool, param, numberOfVolumesInRequest, newVolumeLabel, isChangeVpoolForProtectedVolume, copyOfCapabilities, protectionSystemURI, taskList, task, descriptors, volumeURIs, volumeInfoBuffer, sourceCopyName, standbySourceCopyName);
} catch (Exception e) {
_log.error("Error trying to prepare RP Journal volumes", e);
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(newVolumeLabel);
}
// Prepare the source and targets
if (rpProtectionRec.getSourceRecommendations() != null) {
for (RPRecommendation sourceRec : rpProtectionRec.getSourceRecommendations()) {
// Get a reference to all existing VPLEX Source volumes (if any)
List<Volume> allSourceVolumesInCG = BlockConsistencyGroupUtils.getActiveVplexVolumesInCG(consistencyGroup, _dbClient, Volume.PersonalityTypes.SOURCE);
// first MP volume of a new CG.
if (metroPointEnabled && allSourceVolumesInCG.isEmpty()) {
validateMetroPointType(sourceRec.getMetroPointType());
}
// Get the number of volumes needed to be created for this recommendation.
int volumeCountInRec = sourceRec.getResourceCount();
// All source volumes will share the same secondary journal.
if (isChangeVpoolForProtectedVolume) {
_log.info(String.format("Change Virtual Pool Protected: %d existing source volume(s) in CG [%s](%s) are affected.", allSourceVolumesInCG.size(), consistencyGroup.getLabel(), consistencyGroup.getId()));
// Force the count to the number of existing source volumes in the CG.
volumeCountInRec = allSourceVolumesInCG.size();
}
// Grab a handle of the haRec, it could be null which is Ok.
RPRecommendation haRec = sourceRec.getHaRecommendation();
for (int volumeCount = 0; volumeCount < volumeCountInRec; volumeCount++) {
// Let's not get into multiple of multiples, this class will handle multi volume creates.
// So force the incoming VolumeCreate param to be set to 1 always from here on.
sourceRec.setResourceCount(1);
if (haRec != null) {
haRec.setResourceCount(1);
}
newVolumeLabel = generateDefaultVolumeLabel(volumeName, totalVolumeCount, numberOfVolumesInRequest);
// Grab the existing volume and task object from the incoming task list
Volume preCreatedVolume = StorageScheduler.getPrecreatedVolume(_dbClient, taskList, newVolumeLabel);
// Assemble a Replication Set; A Collection of volumes. One production, and any number of
// targets.
String rsetName = "RSet-" + newVolumeLabel;
// Increment total volume count
totalVolumeCount++;
// This name has to remain unique, especially when the number of volumes requested to be created
// is more than 1.
param.setName(newVolumeLabel);
Volume sourceVolume = null;
// /////// SOURCE ///////////
if (!isChangeVpoolForProtectedVolume) {
if (isChangeVpool) {
_log.info(String.format("Change Vpool, use existing Source Volume [%s].", changeVpoolVolume.getLabel()));
} else {
_log.info("Create RP Source Volume...");
}
// Create the source
sourceVolume = createRecoverPointVolume(sourceRec, newVolumeLabel, project, capabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.SOURCE, rsetName, preCreatedVolume, null, taskList, task, sourceCopyName, descriptors, changeVpoolVolume, isChangeVpool, isSrcAndHaSwapped, true);
} else {
if (metroPointEnabled) {
_log.info("Upgrade to MetroPoint operation...");
// in the CG to reference the newly created stand-by journal.
for (Volume sourceVol : allSourceVolumesInCG) {
_log.info(String.format("Update the source volume [%s](%s) with new standby journal.", sourceVol.getLabel(), sourceVol.getId()));
// All RP+VPLEX Metro volumes in this CG need to have their backing volume
// references updated with the internal site names for exports.
setInternalSitesForSourceBackingVolumes(sourceRec, haRec, sourceVol, true, false, originalVpool.getHaVarrayConnectedToRp(), sourceCopyName, standbySourceCopyName);
// We need to have all the existing RP+VPLEX Metro volumes from the CG
// added to the volumeURI list so we can properly export the standby
// leg to RP for each volume.
volumeURIs.add(sourceVol.getId());
}
} else {
// NOTE: Upgrade to MetroPoint is (currently) the only supported Change Virtual Pool Protected
// operation, so if we have a null standby journal we're in real trouble.
_log.error("Error trying to upgrade to MetroPoint. Standby journal is null.");
throw APIException.badRequests.rpBlockApiImplPrepareVolumeException(newVolumeLabel);
}
// past this point.
break;
}
volumeURIs.add(sourceVolume.getId());
// NOTE: This is only needed for MetroPoint and Distributed RP+VPLEX(HA as RP source),
// nothing will happen for regular RP volumes.
//
// Source volumes need to have their backing volumes set with the correct internal
// site name. The reason for this is so we know later on where to export the volumes to.
//
// This is very evident with MetroPoint as we need to export BOTH sides of the VPLEX Distributed
// Volume.
//
// This is less evident with Distributed RP+VPLEX that has "HA as RP source" set.
// In this case we need to set it on the HA volume as that is the side to export (not the source
// side).
// To do this we need to pass in a hint...
// We need the (unswapped) original vpool and we then check the getHaVarrayConnectedToRp() value
// which tells us
// which side(varray) to export.
// This value will only be used if isSrcAndHaSwapped == true.
setInternalSitesForSourceBackingVolumes(sourceRec, haRec, sourceVolume, metroPointEnabled, isSrcAndHaSwapped, originalVpool.getHaVarrayConnectedToRp(), sourceCopyName, standbySourceCopyName);
// /////// TARGET(S) ///////////
List<URI> protectionTargets = new ArrayList<URI>();
for (RPRecommendation targetRec : sourceRec.getTargetRecommendations()) {
// Keep track of the targets created
protectionTargets.add(targetRec.getVirtualArray());
// Grab the target's varray
VirtualArray targetVirtualArray = _dbClient.queryObject(VirtualArray.class, targetRec.getVirtualArray());
_log.info(String.format("Create Target (%s)...", targetVirtualArray.getLabel()));
// to provision this target.
if (isChangeVpoolForProtectedVolume) {
Volume alreadyProvisionedTarget = RPHelper.findAlreadyProvisionedTargetVolume(changeVpoolVolume, targetRec.getVirtualArray(), _dbClient);
if (alreadyProvisionedTarget != null) {
_log.info(String.format("Existing target volume [%s] found for varray [%s].", alreadyProvisionedTarget.getLabel(), targetVirtualArray.getLabel()));
// No need to go further, continue on to the next target varray
continue;
}
}
// Generate target volume name
String targetVolumeName = new StringBuilder(newVolumeLabel).append(VOLUME_TYPE_TARGET + targetVirtualArray.getLabel()).toString();
// Create the target
Volume targetVolume = createRecoverPointVolume(targetRec, targetVolumeName, project, copyOfCapabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.TARGET, rsetName, null, sourceVolume, taskList, task, targetRec.getRpCopyName(), descriptors, null, false, false, false);
volumeInfoBuffer.append(logVolumeInfo(targetVolume));
volumeURIs.add(targetVolume.getId());
}
// /////// METROPOINT LOCAL TARGET(S) ///////////
if (metroPointEnabled && haRec.getTargetRecommendations() != null && !haRec.getTargetRecommendations().isEmpty()) {
// then we need to create targets for the second (stand-by) leg.
for (RPRecommendation standbyTargetRec : haRec.getTargetRecommendations()) {
// Grab the MP target's varray
VirtualArray standyTargetVirtualArray = _dbClient.queryObject(VirtualArray.class, standbyTargetRec.getVirtualArray());
_log.info(String.format("Create Standby Target (%s)..", standyTargetVirtualArray.getLabel()));
// source recommendation.
if (protectionTargets.contains(standbyTargetRec.getVirtualArray())) {
continue;
}
// standby.
if (isChangeVpoolForProtectedVolume) {
Volume alreadyProvisionedTarget = RPHelper.findAlreadyProvisionedTargetVolume(changeVpoolVolume, standyTargetVirtualArray.getId(), _dbClient);
if (alreadyProvisionedTarget != null) {
_log.info(String.format("Existing target volume [%s] found for varray [%s].", alreadyProvisionedTarget.getLabel(), standyTargetVirtualArray.getLabel()));
// No need to go further, continue on to the next target varray
continue;
}
}
// Generate standby target label
String standbyTargetVolumeName = new StringBuilder(newVolumeLabel).append(VOLUME_TYPE_TARGET + standyTargetVirtualArray.getLabel()).toString();
// Create the standby target
Volume standbyTargetVolume = createRecoverPointVolume(standbyTargetRec, standbyTargetVolumeName, project, copyOfCapabilities, consistencyGroup, param, protectionSystemURI, Volume.PersonalityTypes.TARGET, rsetName, null, sourceVolume, taskList, task, standbyTargetRec.getRpCopyName(), descriptors, null, false, false, false);
volumeInfoBuffer.append(logVolumeInfo(standbyTargetVolume));
volumeURIs.add(standbyTargetVolume.getId());
}
}
// Hold off on logging the source volume until we're done creating the targets
volumeInfoBuffer.append(logVolumeInfo(sourceVolume));
}
}
volumeInfoBuffer.append(String.format(NEW_LINE));
_log.info(volumeInfoBuffer.toString());
}
}
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class RPBlockServiceApiImpl method rpVPlexJournalMigrations.
/**
* Special Journal migration step needed as Journals belong to a CG and need to be
* gathered from the CG. These migrations are always single migrations.
*
* @param journalMigrationsExist Boolean to determine if journal migrations exist
* @param journalVpoolMigrations List of RPVPlexMigrations for Journals
* @param singleMigrations Container to store all single migrations
* @param cgURIs Set of URIs of all the CGs from the request
* @param logMigrations String buffer for logging
*/
private void rpVPlexJournalMigrations(boolean journalMigrationsExist, List<RPVPlexMigration> journalVpoolMigrations, Map<Volume, VirtualPool> singleMigrations, Set<URI> cgURIs, StringBuffer logMigrations) {
if (journalMigrationsExist) {
for (URI cgURI : cgURIs) {
BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, cgURI);
// Get all Journal volumes from the CG.
List<Volume> journalVolumes = RPHelper.getCgVolumes(_dbClient, cg.getId(), Volume.PersonalityTypes.METADATA.name());
for (Volume journalVolume : journalVolumes) {
// Check to see if this Journal volume qualifies for migration
RPVPlexMigration journalMigration = null;
for (RPVPlexMigration migration : journalVpoolMigrations) {
if (journalVolume.getVirtualArray().equals(migration.getVarray())) {
// Need to make sure we're migrating the right Journal, so check to make sure the copy names match
boolean isSourceJournal = migration.getSubType().equals(Volume.PersonalityTypes.SOURCE) ? true : false;
String copyName = RPHelper.getCgCopyName(_dbClient, cg, migration.getVarray(), isSourceJournal);
if (journalVolume.getRpCopyName().equals(copyName)) {
journalMigration = migration;
break;
}
}
}
// a new task for the operation.
if (journalMigration != null) {
// Make sure the journal volume is not involved in another task. If it is, an exception will
// be thrown.
BlockServiceUtils.checkForPendingTasks(journalVolume.getTenant().getURI(), Arrays.asList(journalVolume), _dbClient);
VirtualPool migrateToVpool = journalMigration.getMigrateToVpool();
logMigrations.append(String.format("\tRP+VPLEX migrate JOURNAL [%s](%s) to vpool [%s](%s)\n", journalVolume.getLabel(), journalVolume.getId(), migrateToVpool.getLabel(), migrateToVpool.getId()));
singleMigrations.put(journalVolume, migrateToVpool);
} else {
_log.info(String.format("No migration info was found for Journal volume [%s](%s). Skipping...", journalVolume.getLabel(), journalVolume.getId()));
}
}
}
}
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class SRDFBlockServiceApiImpl method createVolumesAndDescriptors.
@Override
public List<VolumeDescriptor> createVolumesAndDescriptors(List<VolumeDescriptor> descriptors, String volumeLabel, Long size, Project project, VirtualArray varray, VirtualPool vpool, List<Recommendation> recommendations, TaskList taskList, String task, VirtualPoolCapabilityValuesWrapper capabilities) {
List<VolumeDescriptor> volumeDescriptors = new ArrayList<VolumeDescriptor>();
// If processing SRDFCopyRecommendations, then just return the SRDFTargets.
for (Recommendation recommendation : recommendations) {
if (recommendation instanceof SRDFCopyRecommendation) {
SRDFRecommendation srdfRecommendation = (SRDFRecommendation) recommendation.getRecommendation();
// Get the Target structure
SRDFRecommendation.Target target = srdfRecommendation.getVirtualArrayTargetMap().get(recommendation.getVirtualArray());
if (target.getDescriptors() != null) {
volumeDescriptors.addAll(target.getDescriptors());
}
}
// so if we had SRDFCopyRecommendations, just return their descriptors now.
if (!volumeDescriptors.isEmpty()) {
return volumeDescriptors;
}
}
// operation for each volume to be created.
if (taskList == null) {
taskList = new TaskList();
}
Iterator<Recommendation> recommendationsIter;
final BlockConsistencyGroup consistencyGroup = capabilities.getBlockConsistencyGroup() == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
// prepare the volumes
List<URI> volumeURIs = prepareRecommendedVolumes(task, taskList, project, varray, vpool, capabilities.getResourceCount(), recommendations, consistencyGroup, volumeLabel, size.toString());
// Execute the volume creations requests for each recommendation.
recommendationsIter = recommendations.iterator();
while (recommendationsIter.hasNext()) {
Recommendation recommendation = recommendationsIter.next();
volumeDescriptors.addAll(createVolumeDescriptors((SRDFRecommendation) recommendation, volumeURIs, capabilities));
// Log volume descriptor information
logVolumeDescriptorPrecreateInfo(volumeDescriptors, task);
}
return volumeDescriptors;
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class BlockConsistencyGroupService method performProtectionAction.
/**
* Since all of the protection operations are very similar, this method does all of the work.
* We keep the actual REST methods separate mostly for the purpose of documentation generators.
*
* @param consistencyGroupId the URI of the BlockConsistencyGroup to perform the protection action against.
* @param targetVarrayId the target virtual array.
* @param pointInTime any point in time, specified in UTC.
* Allowed values: "yyyy-MM-dd_HH:mm:ss" formatted date or datetime in milliseconds.
* @param op operation to perform (pause, stop, failover, etc)
* @return task resource rep
* @throws InternalException
*/
private TaskResourceRep performProtectionAction(URI consistencyGroupId, Copy copy, String op) throws InternalException {
ArgValidator.checkFieldUriType(consistencyGroupId, BlockConsistencyGroup.class, "id");
ArgValidator.checkFieldUriType(copy.getCopyID(), VirtualArray.class, "copyId");
// Get the BlockConsistencyGroup and target VirtualArray associated with the request.
final BlockConsistencyGroup consistencyGroup = (BlockConsistencyGroup) queryResource(consistencyGroupId);
final VirtualArray targetVirtualArray = _permissionsHelper.getObjectById(copy.getCopyID(), VirtualArray.class);
ArgValidator.checkEntity(consistencyGroup, consistencyGroupId, true);
ArgValidator.checkEntity(targetVirtualArray, copy.getCopyID(), true);
// The consistency group needs to be associated with RecoverPoint in order to perform the operation.
if (!consistencyGroup.checkForType(Types.RP)) {
// Attempt to do protection link management on unprotected CG
throw APIException.badRequests.consistencyGroupMustBeRPProtected(consistencyGroupId);
}
if (op.equalsIgnoreCase(ProtectionOp.SWAP.getRestOp()) && !NullColumnValueGetter.isNullURI(consistencyGroupId)) {
ExportUtils.validateConsistencyGroupBookmarksExported(_dbClient, consistencyGroupId);
}
// Catch any attempts to use an invalid access mode
if (op.equalsIgnoreCase(ProtectionOp.CHANGE_ACCESS_MODE.getRestOp()) && !Copy.ImageAccessMode.DIRECT_ACCESS.name().equalsIgnoreCase(copy.getAccessMode())) {
throw APIException.badRequests.unsupportedAccessMode(copy.getAccessMode());
}
// Verify that the supplied target Virtual Array is being referenced by at least one target volume in the CG.
List<Volume> targetVolumes = getTargetVolumes(consistencyGroup, copy.getCopyID());
if (targetVolumes == null || targetVolumes.isEmpty()) {
// The supplied target varray is not referenced by any target volumes in the CG.
throw APIException.badRequests.targetVirtualArrayDoesNotMatch(consistencyGroupId, copy.getCopyID());
}
// Get the first target volume
Volume targetVolume = targetVolumes.get(0);
String task = UUID.randomUUID().toString();
Operation status = new Operation();
status.setResourceType(ProtectionOp.getResourceOperationTypeEnum(op));
_dbClient.createTaskOpStatus(BlockConsistencyGroup.class, consistencyGroupId, task, status);
ProtectionSystem system = _dbClient.queryObject(ProtectionSystem.class, targetVolume.getProtectionController());
String deviceType = system.getSystemType();
if (!deviceType.equals(DiscoveredDataObject.Type.rp.name())) {
throw APIException.badRequests.protectionForRpClusters();
}
RPController controller = getController(RPController.class, system.getSystemType());
controller.performProtectionOperation(system.getId(), consistencyGroupId, targetVolume.getId(), copy.getPointInTime(), copy.getAccessMode(), op, task);
/*
* auditOp(OperationTypeEnum.PERFORM_PROTECTION_ACTION, true, AuditLogManager.AUDITOP_BEGIN,
* op, copyID.toString(), id.toString(), system.getId().toString());
*/
return toTask(consistencyGroup, task, status);
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class BlockConsistencyGroupService method createConsistencyGroup.
/**
* Create a new consistency group
*
* You can create a consistency group, but adding volumes into it will be done using in the
* volume create operations:
*
* 1. Create CG object in Bourne 2. Operation will be synchronous
*
* @prereq none
*
* @param param
*
* @brief Create consistency group
* @return Consistency Group created
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public BlockConsistencyGroupRestRep createConsistencyGroup(final BlockConsistencyGroupCreate param) {
checkForDuplicateName(param.getName(), BlockConsistencyGroup.class);
ArgValidator.checkIsAlphaNumeric(param.getName());
// Validate name
ArgValidator.checkFieldNotEmpty(param.getName(), "name");
// Validate name not greater than 64 characters
ArgValidator.checkFieldLengthMaximum(param.getName(), CG_MAX_LIMIT, "name");
// Validate project
ArgValidator.checkFieldUriType(param.getProject(), Project.class, "project");
final Project project = _dbClient.queryObject(Project.class, param.getProject());
ArgValidator.checkEntity(project, param.getProject(), isIdEmbeddedInURL(param.getProject()));
// Verify the user is authorized.
verifyUserIsAuthorizedForRequest(project);
// Create Consistency Group in db
final BlockConsistencyGroup consistencyGroup = new BlockConsistencyGroup();
consistencyGroup.setId(URIUtil.createId(BlockConsistencyGroup.class));
consistencyGroup.setLabel(param.getName());
consistencyGroup.setProject(new NamedURI(project.getId(), project.getLabel()));
consistencyGroup.setTenant(project.getTenantOrg());
// disable array consistency if user has selected not to create backend replication group
consistencyGroup.setArrayConsistency(param.getArrayConsistency());
_dbClient.createObject(consistencyGroup);
return map(consistencyGroup, null, _dbClient);
}
Aggregations