use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class RecoverPointScheduler method cgPoolsHaveAvailableCapacity.
/**
* Computes if the existing storage pools used have sufficient capacity to satisfy the placement request
*
* @param sourceVolume The Source volume to use for the capacity checks
* @param capabilities Capabilities reference
* @param vpool The vpool being used
* @param protectionVarrays The protection Varrays of the vpool
* @return true if capacity is available, false otherwise.
*/
private boolean cgPoolsHaveAvailableCapacity(Volume sourceVolume, VirtualPoolCapabilityValuesWrapper capabilities, VirtualPool vpool, List<VirtualArray> protectionVarrays) {
boolean cgPoolsHaveAvailableCapacity = true;
Map<URI, Long> storagePoolRequiredCapacity = new HashMap<URI, Long>();
Map<URI, StoragePool> storagePoolCache = new HashMap<URI, StoragePool>();
// Keep a map with some extra info in it so the logs have a better description of
// why we can't reuse a particular pool.
Map<URI, String> storagePoolErrorDetail = new HashMap<URI, String>();
_log.info(String.format("Checking if the existing storage pools used have sufficient capacity to satisfy the placement request..."));
if (sourceVolume != null) {
// TODO: need to update code below to look like the stuff Bharath added for multiple resources
long sourceVolumesRequiredCapacity = getSizeInKB(capabilities.getSize() * capabilities.getResourceCount());
if (RPHelper.isVPlexVolume(sourceVolume, dbClient)) {
if (null == sourceVolume.getAssociatedVolumes() || sourceVolume.getAssociatedVolumes().isEmpty()) {
_log.error("VPLEX volume {} has no backend volumes.", sourceVolume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(sourceVolume.forDisplay());
}
for (String backingVolumeId : sourceVolume.getAssociatedVolumes()) {
Volume backingVolume = dbClient.queryObject(Volume.class, URI.create(backingVolumeId));
StoragePool backingVolumePool = dbClient.queryObject(StoragePool.class, backingVolume.getPool());
storagePoolCache.put(backingVolumePool.getId(), backingVolumePool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, backingVolumePool.getId(), sourceVolumesRequiredCapacity);
storagePoolErrorDetail.put(backingVolumePool.getId(), sourceVolume.getPersonality());
}
} else {
StoragePool sourcePool = dbClient.queryObject(StoragePool.class, sourceVolume.getPool());
storagePoolCache.put(sourcePool.getId(), sourcePool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, sourcePool.getId(), sourceVolumesRequiredCapacity);
storagePoolErrorDetail.put(sourcePool.getId(), sourceVolume.getPersonality());
}
List<Volume> sourceJournals = RPHelper.findExistingJournalsForCopy(dbClient, sourceVolume.getConsistencyGroup(), sourceVolume.getRpCopyName());
Volume sourceJournal = sourceJournals.get(0);
if (sourceJournal == null) {
_log.error(String.format("No existing source journal found in CG [%s] for copy [%s], returning false", sourceVolume.getConsistencyGroup(), sourceVolume.getRpCopyName()));
throw APIException.badRequests.unableToFindSuitableJournalRecommendation();
}
long sourceJournalSizePerPolicy = RPHelper.getJournalSizeGivenPolicy(String.valueOf(capabilities.getSize()), vpool.getJournalSize(), capabilities.getResourceCount());
long sourceJournalVolumesRequiredCapacity = getSizeInKB(sourceJournalSizePerPolicy);
if (RPHelper.isVPlexVolume(sourceJournal, dbClient)) {
for (String backingVolumeId : sourceJournal.getAssociatedVolumes()) {
Volume backingVolume = dbClient.queryObject(Volume.class, URI.create(backingVolumeId));
StoragePool backingVolumePool = dbClient.queryObject(StoragePool.class, backingVolume.getPool());
storagePoolCache.put(backingVolumePool.getId(), backingVolumePool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, backingVolumePool.getId(), sourceJournalVolumesRequiredCapacity);
storagePoolErrorDetail.put(backingVolumePool.getId(), sourceVolume.getPersonality() + " " + sourceJournal.getPersonality());
}
} else {
StoragePool sourceJournalPool = dbClient.queryObject(StoragePool.class, sourceJournal.getPool());
storagePoolCache.put(sourceJournalPool.getId(), sourceJournalPool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, sourceJournalPool.getId(), sourceJournalVolumesRequiredCapacity);
storagePoolErrorDetail.put(sourceJournalPool.getId(), sourceVolume.getPersonality() + " " + sourceJournal.getPersonality());
}
if (sourceVolume.getRpTargets() != null) {
for (VirtualArray protectionVarray : protectionVarrays) {
// Find the pools that apply to this virtual
VpoolProtectionVarraySettings settings = RPHelper.getProtectionSettings(vpool, protectionVarray, dbClient);
// If there was no vpool specified with the protection settings, use the base vpool for this varray.
VirtualPool protectionVpool = vpool;
if (settings.getVirtualPool() != null) {
protectionVpool = dbClient.queryObject(VirtualPool.class, settings.getVirtualPool());
}
// Find the existing source volume target that corresponds to this protection
// virtual array. We need to see if the storage pool has capacity for another
// target volume.
Volume targetVolume = getTargetVolumeForProtectionVirtualArray(sourceVolume, protectionVarray);
// Target volumes will be the same size as the source
long targetVolumeRequiredCapacity = getSizeInKB(capabilities.getSize());
if (RPHelper.isVPlexVolume(targetVolume, dbClient)) {
for (String backingVolumeId : targetVolume.getAssociatedVolumes()) {
Volume backingVolume = dbClient.queryObject(Volume.class, URI.create(backingVolumeId));
StoragePool backingVolumePool = dbClient.queryObject(StoragePool.class, backingVolume.getPool());
storagePoolCache.put(backingVolumePool.getId(), backingVolumePool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, backingVolumePool.getId(), targetVolumeRequiredCapacity);
storagePoolErrorDetail.put(backingVolumePool.getId(), targetVolume.getPersonality());
}
} else {
StoragePool targetPool = dbClient.queryObject(StoragePool.class, targetVolume.getPool());
storagePoolCache.put(targetPool.getId(), targetPool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, targetPool.getId(), targetVolumeRequiredCapacity);
storagePoolErrorDetail.put(targetPool.getId(), targetVolume.getPersonality());
}
// Account for the target journal volumes.
List<Volume> targetJournals = RPHelper.findExistingJournalsForCopy(dbClient, targetVolume.getConsistencyGroup(), targetVolume.getRpCopyName());
Volume targetJournalVolume = targetJournals.get(0);
if (targetJournalVolume == null) {
_log.error(String.format("No existing target journal found in CG [%s] for copy [%s], returning false", targetVolume.getConsistencyGroup(), targetVolume.getRpCopyName()));
throw APIException.badRequests.unableToFindSuitableJournalRecommendation();
}
long targetJournalSizePerPolicy = RPHelper.getJournalSizeGivenPolicy(String.valueOf(capabilities.getSize()), protectionVpool.getJournalSize(), capabilities.getResourceCount());
long targetJournalVolumeRequiredCapacity = getSizeInKB(targetJournalSizePerPolicy);
if (RPHelper.isVPlexVolume(targetJournalVolume, dbClient)) {
for (String backingVolumeId : targetJournalVolume.getAssociatedVolumes()) {
Volume backingVolume = dbClient.queryObject(Volume.class, URI.create(backingVolumeId));
StoragePool backingVolumePool = dbClient.queryObject(StoragePool.class, backingVolume.getPool());
storagePoolCache.put(backingVolumePool.getId(), backingVolumePool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, backingVolumePool.getId(), targetJournalVolumeRequiredCapacity);
storagePoolErrorDetail.put(backingVolumePool.getId(), targetVolume.getPersonality() + " " + targetJournalVolume.getPersonality());
}
} else {
StoragePool targetJournalPool = dbClient.queryObject(StoragePool.class, targetJournalVolume.getPool());
storagePoolCache.put(targetJournalPool.getId(), targetJournalPool);
updateStoragePoolRequiredCapacityMap(storagePoolRequiredCapacity, targetJournalPool.getId(), targetJournalVolumeRequiredCapacity);
storagePoolErrorDetail.put(targetJournalPool.getId(), targetVolume.getPersonality() + " " + targetJournalVolume.getPersonality());
}
}
}
BlockConsistencyGroup cg = dbClient.queryObject(BlockConsistencyGroup.class, capabilities.getBlockConsistencyGroup());
for (Map.Entry<URI, Long> storagePoolEntry : storagePoolRequiredCapacity.entrySet()) {
StoragePool storagePool = storagePoolCache.get(storagePoolEntry.getKey());
long freeCapacity = storagePool.getFreeCapacity();
long requiredCapacity = storagePoolEntry.getValue().longValue();
if (requiredCapacity > freeCapacity) {
cgPoolsHaveAvailableCapacity = false;
_log.info(String.format("Unable to fully align placement with existing %s volume from " + "RecoverPoint consistency group [%s]. Required capacity is %s and we can't re-use storage pool [%s] " + "as it only has %s free capacity.", storagePoolErrorDetail.get(storagePool.getId()), sourceVolume.getLabel(), cg.getLabel(), SizeUtil.translateSize(requiredCapacity, SizeUtil.SIZE_GB), storagePool.getLabel(), SizeUtil.translateSize(freeCapacity, SizeUtil.SIZE_GB)));
break;
} else {
_log.info(String.format("Storage pool [%s], used by consistency group [%s], has the required capacity and will be " + "used for this placement request.", storagePool.getLabel(), cg.getLabel()));
}
}
}
return cgPoolsHaveAvailableCapacity;
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class SRDFScheduler method findRAGroup.
/**
* Match up RA Groups to the source and target storage systems. If a match is found, return the
* ID.
*
* @param sourceStorageSystem
* potential source storage system
* @param targetStorageSystem
* potential target storage system
* @param copyMode
* async, sync mode literals
* @param project
* project requested
* @return Remote Group ID
*/
private URI findRAGroup(final StorageSystem sourceStorageSystem, final StorageSystem targetStorageSystem, final String copyMode, final Project project, final URI consistencyGroupUri) {
URIQueryResultList raGroupsInDB = new URIQueryResultList();
BlockConsistencyGroup cgObj = null;
if (null != consistencyGroupUri) {
cgObj = _dbClient.queryObject(BlockConsistencyGroup.class, consistencyGroupUri);
}
// Primary name check, "V-<projectname>" or "<projectname>"
StringSet grpNames = SRDFUtils.getQualifyingRDFGroupNames(project);
// If placement doesn't require project-based label below, remove this check.
if (project.getLabel().length() > SRDFUtils.RDF_GROUP_NAME_MAX_LENGTH - SRDFUtils.RDF_GROUP_PREFIX.length()) {
_log.warn(String.format("SRDF RA Group Placement: Project name is longer than the number of characters allowed by VMAX for an RA group name. This will cause an issue if you have multiple projects that start with %s", project.getLabel().substring(0, SRDFUtils.RDF_GROUP_NAME_MAX_LENGTH - SRDFUtils.RDF_GROUP_PREFIX.length())));
}
_dbClient.queryByConstraint(ContainmentConstraint.Factory.getStorageDeviceRemoteGroupsConstraint(sourceStorageSystem.getId()), raGroupsInDB);
Iterator<URI> raGroupIter = raGroupsInDB.iterator();
List<RemoteDirectorGroup> raGroups = findRAGroupAssociatedWithCG(raGroupIter, cgObj);
for (RemoteDirectorGroup raGroup : raGroups) {
URI raGroupId = raGroup.getId();
_log.info(String.format("SRDF RA Group Placement: Checking to see if RA Group: %s is suitable for SRDF protection, given the request.", raGroup.getLabel()));
_log.info(String.format("SRDF RA Group Placement: Source Array: %s --> Target Array: %s", sourceStorageSystem.getNativeGuid(), targetStorageSystem.getNativeGuid()));
// Check to see if it exists in the DB and is active
if (null == raGroup || raGroup.getInactive()) {
_log.info("SRDF RA Group Placement: Found that the RA Group is either not in the database or in the deactivated state, not considering.");
continue;
}
// Check to see if the RA Group contains (substring is OK) any of the desired labels
if (raGroup.getLabel() == null || !SRDFUtils.containsRaGroupName(grpNames, raGroup.getLabel())) {
_log.info(String.format("SRDF RA Group Placement: Found that the RA Group does not have a label or does not contain any of the names (%s), which is currently required for leveraging existing RA Groups.", StringUtils.join(grpNames, ",")));
continue;
}
// Check to see if the source storage system ID matches
if (!raGroup.getSourceStorageSystemUri().equals(sourceStorageSystem.getId())) {
_log.info(String.format("SRDF RA Group Placement: Found that the RA Group does not cater to the source storage system we require. We require %s, but this group is defined as %s", sourceStorageSystem.getNativeGuid(), raGroup.getNativeGuid()));
continue;
}
// Check to see if the remote storage system ID matches
if (!raGroup.getRemoteStorageSystemUri().equals(targetStorageSystem.getId())) {
_log.info(String.format("SRDF RA Group Placement: Found that the RA Group does not cater to the remote (target) storage system we require. We require %s, but this group is defined as %s", targetStorageSystem.getNativeGuid(), raGroup.getNativeGuid()));
continue;
}
// Check to see if the connectivity status is UP
if (!raGroup.getConnectivityStatus().equals(RemoteDirectorGroup.ConnectivityStatus.UP.toString())) {
_log.info(String.format("SRDF RA Group Placement: Found that the RA Group is not in the proper connectivity state of UP, instead it is in the state: %s", raGroup.getConnectivityStatus().toString()));
continue;
}
// decision to come.
if (raGroup.getSupportedCopyMode() == null) {
_log.warn(String.format("SRDF RA Group Placement: Copy Mode not set on RA Group %s, probably an unsupported SRDF Deployment: ", raGroup.getLabel()));
}
// for that copy
if (raGroup.getSupportedCopyMode() != null && !raGroup.getSupportedCopyMode().equals(RemoteDirectorGroup.SupportedCopyModes.ALL.toString()) && !raGroup.getSupportedCopyMode().equals(copyMode)) {
_log.info(String.format("SRDF RA Group Placement: Found that the RA Group does is using the proper copy policy of %s, instead it is using copy policy: %s", copyMode, raGroup.getSupportedCopyMode().toString()));
continue;
}
// Look for empty RA Groups alone, which can be used to create this new CG.
if (raGroups.size() > 1 && null != cgObj && raGroup.getVolumes() != null && !raGroup.getVolumes().isEmpty() && !cgObj.getLabel().equalsIgnoreCase(raGroup.getLabel())) {
_log.info(String.format("Found that the RDF Group %s has existing volumes with a CG different from expected: %s .", raGroup.getLabel(), cgObj.getLabel()));
continue;
}
_log.info(String.format("SRDF RA Group Placement: RA Group: %s on %s --> %s is selected for SRDF protection", raGroup.getLabel(), sourceStorageSystem.getNativeGuid(), targetStorageSystem.getNativeGuid()));
return raGroupId;
}
_log.warn("SRDF RA Group Placement: No RA Group was suitable for SRDF protection. See previous log messages for specific failed criteria on each RA Group considered.");
return null;
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class VmaxSnapshotOperations method restoreSnapshotSession.
/**
* {@inheritDoc}
*/
@SuppressWarnings("rawtypes")
@Override
public void restoreSnapshotSession(StorageSystem system, URI snapSessionURI, TaskCompleter completer) throws DeviceControllerException {
if (system.checkIfVmax3()) {
// Only supported for VMAX3 storage systems.
try {
_log.info("Restore snapshot session {} START", snapSessionURI);
BlockSnapshotSession snapSession = _dbClient.queryObject(BlockSnapshotSession.class, snapSessionURI);
String syncAspectPath = snapSession.getSessionInstance();
CIMObjectPath settingsStatePath = null;
BlockObject sourceObj = null;
if (snapSession.hasConsistencyGroup() && NullColumnValueGetter.isNotNullValue(snapSession.getReplicationGroupInstance())) {
_log.info("Restoring group snapshot session");
// We need a single source volume for the session.
BlockConsistencyGroup cg = _dbClient.queryObject(BlockConsistencyGroup.class, snapSession.getConsistencyGroup());
List<Volume> nativeVolumes = BlockConsistencyGroupUtils.getActiveNativeVolumesInCG(cg, _dbClient);
// get source group name from the session.
String sourceGroupName = snapSession.getReplicationGroupInstance();
settingsStatePath = _cimPath.getGroupSynchronizedSettingsPath(system, sourceGroupName, syncAspectPath);
for (Volume volume : nativeVolumes) {
if (sourceGroupName.equals(volume.getReplicationGroupInstance())) {
sourceObj = volume;
// get source volume which matches session's RG name
break;
}
}
} else {
_log.info("Restoring single volume snapshot session");
sourceObj = BlockObject.fetch(_dbClient, snapSession.getParent().getURI());
CIMObjectPath sourcePath = _cimPath.getVolumePath(system, sourceObj.getNativeId());
settingsStatePath = _cimPath.getSyncSettingsPath(system, sourcePath, syncAspectPath);
}
// Terminate restore sessions.
terminateAnyRestoreSessions(system, null, sourceObj.getId(), completer);
// Invoke SMI-S method to restore snapshot session.
CIMObjectPath replicationSvcPath = _cimPath.getControllerReplicationSvcPath(system);
CIMArgument[] inArgs = null;
CIMArgument[] outArgs = new CIMArgument[5];
inArgs = _helper.getRestoreFromSettingsStateInputArguments(settingsStatePath, true);
_helper.invokeMethod(system, replicationSvcPath, SmisConstants.MODIFY_SETTINGS_DEFINE_STATE, inArgs, outArgs);
CIMObjectPath jobPath = _cimPath.getCimObjectPathFromOutputArgs(outArgs, SmisConstants.JOB);
ControllerServiceImpl.enqueueJob(new QueueJob(new SmisBlockSnapshotSessionRestoreJob(jobPath, system.getId(), completer)));
} catch (Exception e) {
_log.error("Exception restoring snapshot session", e);
ServiceError error = DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
completer.error(_dbClient, error);
}
} else {
throw DeviceControllerException.exceptions.blockDeviceOperationNotSupported();
}
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class ConsistencyGroupUtils method getSnapshotsConsistencyGroup.
/**
* Gets the {@BlockConsistencyGroup} associated with a snapshot in the given list of snapshots.
*
* @param snapshots
* @param dbClient
* @return
*/
public static BlockConsistencyGroup getSnapshotsConsistencyGroup(List<BlockSnapshot> snapshots, DbClient dbClient) {
if (snapshots.isEmpty()) {
return null;
}
BlockConsistencyGroup cgResult = null;
BlockSnapshot snapshot = snapshots.get(0);
if (snapshot != null && !isNullURI(snapshot.getConsistencyGroup()) && getSourceConsistencyGroupName(snapshot, dbClient) != null) {
cgResult = dbClient.queryObject(BlockConsistencyGroup.class, snapshot.getConsistencyGroup());
}
return cgResult;
}
use of com.emc.storageos.db.client.model.BlockConsistencyGroup in project coprhd-controller by CoprHD.
the class ConsistencyGroupUtils method getCloneConsistencyGroup.
/**
* Gets the {@BlockConsistencyGroup} associated with the given clone.
*
* @param cloneURI
* @param dbClient
* @return
*/
public static BlockConsistencyGroup getCloneConsistencyGroup(URI cloneURI, DbClient dbClient) {
BlockConsistencyGroup cgResult = null;
Volume clone = dbClient.queryObject(Volume.class, cloneURI);
if (clone != null) {
URI systemURI = clone.getStorageController();
StorageSystem storage = dbClient.queryObject(StorageSystem.class, systemURI);
if (storage.deviceIsType(DiscoveredDataObject.Type.ibmxiv)) {
return null;
}
URI source = clone.getAssociatedSourceVolume();
BlockObject sourceObj = BlockObject.fetch(dbClient, source);
if (sourceObj instanceof BlockSnapshot) {
return null;
}
Volume sourceVolume = (Volume) sourceObj;
if (!isNullURI(sourceVolume.getConsistencyGroup())) {
final URI cgId = sourceVolume.getConsistencyGroup();
if (cgId != null) {
cgResult = dbClient.queryObject(BlockConsistencyGroup.class, cgId);
if (!ControllerUtils.checkCGCreatedOnBackEndArray(sourceVolume)) {
return null;
}
}
}
}
return cgResult;
}
Aggregations