use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class CreateBlockSnapshotService method execute.
@Override
public void execute() {
Tasks<? extends DataObjectRestRep> tasks = null;
if (ConsistencyUtils.isVolumeStorageType(storageType)) {
for (BlockObjectRestRep volume : volumes) {
checkAndPurgeObsoleteSnapshots(volume.getId().toString());
if (BlockProvider.SNAPSHOT_SESSION_TYPE_VALUE.equals(type)) {
tasks = execute(new CreateBlockSnapshotSession(volume.getId(), nameParam, linkedSnapshotName, linkedSnapshotCount, linkedSnapshotCopyMode));
} else {
tasks = execute(new CreateBlockSnapshot(volume.getId(), type, nameParam, readOnly));
}
addAffectedResources(tasks);
addRetainedReplicas(volume.getId(), tasks.getTasks());
}
} else {
for (String consistencyGroupId : volumeIds) {
checkAndPurgeObsoleteSnapshots(consistencyGroupId);
if (BlockProvider.CG_SNAPSHOT_SESSION_TYPE_VALUE.equals(type)) {
tasks = ConsistencyUtils.createSnapshotSession(uri(consistencyGroupId), nameParam, linkedSnapshotName, linkedSnapshotCount, linkedSnapshotCopyMode);
} else {
tasks = ConsistencyUtils.createSnapshot(uri(consistencyGroupId), nameParam, readOnly);
}
addAffectedResources(tasks);
addRetainedReplicas(uri(consistencyGroupId), tasks.getTasks());
}
}
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class CreateBlockSnapshotService method checkAndPurgeObsoleteSnapshots.
/**
* Check retention policy and delete obsolete snapshots if necessary
*
* @param volumeOrCgId - volume id or consistency group id
*/
private void checkAndPurgeObsoleteSnapshots(String volumeOrCgId) {
if (!isRetentionRequired()) {
return;
}
List<RetainedReplica> replicas = findObsoleteReplica(volumeOrCgId);
for (RetainedReplica replica : replicas) {
if (replica.getAssociatedReplicaIds() == null || replica.getAssociatedReplicaIds().isEmpty())
continue;
for (String obsoleteSnapshotId : replica.getAssociatedReplicaIds()) {
info("Deactivating snapshot %s since it exceeds max number of snapshots allowed", obsoleteSnapshotId);
if (ConsistencyUtils.isVolumeStorageType(storageType)) {
if (BlockProvider.SNAPSHOT_SESSION_TYPE_VALUE.equals(type)) {
BlockSnapshotSessionRestRep obsoloteCopy = getClient().blockSnapshotSessions().get(uri(obsoleteSnapshotId));
info("Deactivating snapshot session %s", obsoloteCopy.getName());
execute(new DeactivateBlockSnapshotSession(uri(obsoleteSnapshotId)));
} else {
BlockObjectRestRep obsoleteCopy = BlockStorageUtils.getVolume(uri(obsoleteSnapshotId));
info("Deactivating snapshot %s", obsoleteCopy.getName());
execute(new DeactivateBlockSnapshot(uri(obsoleteSnapshotId), VolumeDeleteTypeEnum.FULL));
}
} else {
if (BlockProvider.CG_SNAPSHOT_SESSION_TYPE_VALUE.equals(type)) {
BlockSnapshotSessionRestRep obsoloteCopy = getClient().blockSnapshotSessions().get(uri(obsoleteSnapshotId));
info("Deactivating snapshot session %s", obsoloteCopy.getName());
ConsistencyUtils.removeSnapshotSession(uri(volumeOrCgId), uri(obsoleteSnapshotId));
} else {
BlockObjectRestRep obsoleteCopy = BlockStorageUtils.getVolume(uri(obsoleteSnapshotId));
info("Deactivating snapshot %s", obsoleteCopy.getName());
ConsistencyUtils.removeSnapshot(uri(volumeOrCgId), uri(obsoleteSnapshotId));
}
}
}
getModelClient().delete(replica);
}
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class CreateBlockVolumeForHostHelper method exportVolumes.
public List<BlockObjectRestRep> exportVolumes(List<URI> volumeIds) {
List<URI> batchVolumeIds = Lists.newArrayList();
int batchCount = 0;
Iterator<URI> ids = volumeIds.iterator();
while (ids.hasNext()) {
batchCount++;
URI id = ids.next();
batchVolumeIds.add(id);
if (batchCount == EXPORT_CHUNK_SIZE || !ids.hasNext()) {
// See if an existing export exists for the host ports
ExportGroupRestRep export = null;
if (cluster != null) {
export = BlockStorageUtils.findExportByCluster(cluster, project, virtualArray, null);
} else {
export = BlockStorageUtils.findExportByHost(host, project, virtualArray, null);
}
// If did not find export group for the host/cluster, try find existing empty export with
// host/cluster name
boolean createExport = export == null;
boolean isEmptyExport = export != null && BlockStorageUtils.isEmptyExport(export);
String exportName = cluster != null ? cluster.getLabel() : host.getHostName();
if (export == null) {
export = BlockStorageUtils.findExportsByName(exportName, project, virtualArray);
isEmptyExport = export != null && BlockStorageUtils.isEmptyExport(export);
createExport = export == null || !isEmptyExport;
// If there is an existing non-empty export with the same name, append a time stamp to the name to make it unique
if (export != null && !isEmptyExport) {
exportName = exportName + BlockStorageUtils.UNDERSCORE + new SimpleDateFormat("yyyyMMddhhmmssSSS").format(new Date());
}
}
// If the export does not exist or there is a non-empty export with the same name, create a new one
if (createExport) {
URI exportId = null;
if (cluster != null) {
exportId = BlockStorageUtils.createClusterExport(project, virtualArray, batchVolumeIds, hlu, cluster, new HashMap<URI, Integer>(), minPaths, maxPaths, pathsPerInitiator, portGroup);
} else {
exportId = BlockStorageUtils.createHostExport(project, virtualArray, batchVolumeIds, hlu, host, new HashMap<URI, Integer>(), minPaths, maxPaths, pathsPerInitiator, portGroup);
}
logInfo("create.block.volume.create.export", exportId);
} else // Add the volume to the existing export
{
BlockStorageUtils.addVolumesToExport(batchVolumeIds, hlu, export.getId(), new HashMap<URI, Integer>(), minPaths, maxPaths, pathsPerInitiator, portGroup);
// If not, add them.
if (isEmptyExport) {
if (cluster != null) {
BlockStorageUtils.addClusterToExport(export.getId(), cluster.getId(), minPaths, maxPaths, pathsPerInitiator, portGroup);
} else {
BlockStorageUtils.addHostToExport(export.getId(), host.getId(), minPaths, maxPaths, pathsPerInitiator, portGroup);
}
}
logInfo("create.block.volume.update.export", export.getId());
}
batchVolumeIds.clear();
batchCount = 0;
}
}
if (host != null) {
ExecutionUtils.addAffectedResource(host.getId().toString());
} else if (cluster != null) {
ExecutionUtils.addAffectedResource(cluster.getId().toString());
}
// The volume is created and exported, clear the rollback steps so it will still be available if any other
// further steps fail
ExecutionUtils.clearRollback();
// Get the volumes after exporting, volumes would not have WWNs until after export in VPLEX
List<BlockObjectRestRep> volumes = BlockStorageUtils.getBlockResources(volumeIds);
return volumes;
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class CreateFullCopyService method checkAndPurgeObsoleteCopies.
/**
* Check retention policy and delete obsolete full copies if necessary
*
* @param volumeOrCgId - volume id or CG id
*/
private void checkAndPurgeObsoleteCopies(String volumeOrCgId) {
if (!isRetentionRequired()) {
return;
}
List<RetainedReplica> replicas = findObsoleteReplica(volumeOrCgId);
for (RetainedReplica replica : replicas) {
for (String obsoleteCopyId : replica.getAssociatedReplicaIds()) {
BlockObjectRestRep obsoleteCopy = BlockStorageUtils.getVolume(uri(obsoleteCopyId));
info("Delete full copy %s (%s) since it exceeds max number of copies allowed", obsoleteCopyId, obsoleteCopy.getName());
if (ConsistencyUtils.isVolumeStorageType(storageType)) {
BlockStorageUtils.removeFullCopy(uri(obsoleteCopyId), VolumeDeleteTypeEnum.FULL);
} else {
ConsistencyUtils.removeFullCopy(uri(volumeOrCgId), uri(obsoleteCopyId));
}
}
getModelClient().delete(replica);
}
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class ExportBlockVolumeHelper method exportBlockResources.
/**
* export the block resources identified by URIs in the given resource id list
*
* @param resourceIds the list of URIs which identify the block resources that need to be exported
* @param parentId the parent URI for the list of resourceIds
* @return The list of export groups which have been created/updated
*/
public List<ExportGroupRestRep> exportBlockResources(List<URI> resourceIds, URI parentId) {
// the list of exports to return
List<ExportGroupRestRep> exports = Lists.newArrayList();
List<URI> newVolumes = new ArrayList<URI>();
Map<URI, Set<URI>> addVolumeExports = Maps.newHashMap();
Map<URI, URI> addComputeResourceToExports = Maps.newHashMap();
// we will need to keep track of the current HLU number
Integer currentHlu = hlu;
// get a list of all block resources using the id list provided
List<BlockObjectRestRep> blockResources = BlockStorageUtils.getBlockResources(resourceIds, parentId);
URI virtualArrayId = null;
String exportName = cluster != null ? cluster.getLabel() : host.getHostName();
// Flag to indicate an empty ExportGroup object in ViPR, i.e., ExportGroup without any volumes and initiators in it.
boolean isEmptyExport = true;
ExportGroupRestRep export = null;
for (BlockObjectRestRep blockResource : blockResources) {
virtualArrayId = getVirtualArrayId(blockResource);
// see if we can find an export that uses this block resource
export = findExistingExportGroup(blockResource, virtualArrayId);
boolean createExport = export == null;
isEmptyExport = export != null && BlockStorageUtils.isEmptyExport(export);
// host/cluster name
if (export == null) {
export = BlockStorageUtils.findExportsByName(exportName, projectId, virtualArrayId);
isEmptyExport = export != null && BlockStorageUtils.isEmptyExport(export);
createExport = export == null || !isEmptyExport;
}
if (createExport) {
newVolumes.add(blockResource.getId());
} else // Export exists, check if volume belongs to it
{
if (BlockStorageUtils.isVolumeInExportGroup(export, blockResource.getId())) {
logInfo("export.block.volume.contains.volume", export.getId(), blockResource.getId());
} else {
updateExportVolumes(export, blockResource, addVolumeExports);
}
// If not, add them.
if (isEmptyExport) {
URI computeResource = cluster != null ? cluster.getId() : host.getId();
addComputeResourceToExports.put(export.getId(), computeResource);
}
exports.add(export);
}
}
// If there is an existing non-empty export with the same name, append a time stamp to the name to make it unique
if (export != null && !isEmptyExport) {
exportName = exportName + BlockStorageUtils.UNDERSCORE + new SimpleDateFormat("yyyyMMddhhmmssSSS").format(new Date());
}
// Bulk update multiple volumes to single export
List<URI> volumeIds = Lists.newArrayList();
for (Map.Entry<URI, Set<URI>> entry : addVolumeExports.entrySet()) {
volumeIds.addAll(entry.getValue());
}
Map<URI, Integer> volumeHlus = getVolumeHLUs(volumeIds);
for (Map.Entry<URI, Set<URI>> entry : addVolumeExports.entrySet()) {
BlockStorageUtils.addVolumesToExport(entry.getValue(), currentHlu, entry.getKey(), volumeHlus, minPaths, maxPaths, pathsPerInitiator, portGroup);
logInfo("export.block.volume.add.existing", entry.getValue(), entry.getKey());
if ((currentHlu != null) && (currentHlu > -1)) {
currentHlu += entry.getValue().size();
}
}
for (Map.Entry<URI, URI> entry : addComputeResourceToExports.entrySet()) {
if (cluster != null) {
BlockStorageUtils.addClusterToExport(entry.getKey(), cluster.getId(), minPaths, maxPaths, pathsPerInitiator, portGroup);
logInfo("export.cluster.add.existing", entry.getValue(), entry.getKey());
} else {
BlockStorageUtils.addHostToExport(entry.getKey(), host.getId(), minPaths, maxPaths, pathsPerInitiator, portGroup);
logInfo("export.host.add.existing", entry.getValue(), entry.getKey());
}
}
// Create new export with multiple volumes that don't belong to an export
if (!newVolumes.isEmpty()) {
volumeHlus = getVolumeHLUs(newVolumes);
URI exportId = null;
if (cluster != null) {
exportId = BlockStorageUtils.createClusterExport(projectId, virtualArrayId, newVolumes, currentHlu, cluster, volumeHlus, minPaths, maxPaths, pathsPerInitiator, portGroup);
} else {
exportId = BlockStorageUtils.createHostExport(projectId, virtualArrayId, newVolumes, currentHlu, host, volumeHlus, minPaths, maxPaths, pathsPerInitiator, portGroup);
}
ExportGroupRestRep exportGroup = BlockStorageUtils.getExport(exportId);
// add this export to the list of exports we will return to the caller
exports.add(exportGroup);
}
// add host or cluster to the affected resources
if (host != null) {
ExecutionUtils.addAffectedResource(host.getId().toString());
} else if (cluster != null) {
ExecutionUtils.addAffectedResource(cluster.getId().toString());
}
// Clear the rollback at this point so later errors won't undo the exports
ExecutionUtils.clearRollback();
return exports;
}
Aggregations