use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class AbstractBlockServiceApiImpl method prepareSnapshots.
/**
* Prepares the snapshots for a snapshot request.
*
* @param volumes
* The volumes for which snapshots are to be created.
* @param snapshotType
* The snapshot technology type.
* @param snapshotName
* The snapshot name.
* @param snapshotURIs
* [OUT] The URIs for the prepared snapshots.
* @param taskId
* The unique task identifier
*
* @return The list of snapshots
*/
@Override
public List<BlockSnapshot> prepareSnapshots(List<Volume> volumes, String snapshotType, String snapshotName, List<URI> snapshotURIs, String taskId) {
List<BlockSnapshot> snapshots = new ArrayList<BlockSnapshot>();
int count = 1;
for (Volume volume : volumes) {
// Attempt to create distinct labels here when creating >1 volumes (ScaleIO requirement)
String rgName = volume.getReplicationGroupInstance();
VolumeGroup application = volume.getApplication(_dbClient);
if (volume.isVPlexVolume(_dbClient)) {
Volume backendVol = VPlexUtil.getVPLEXBackendVolume(volumes.get(0), true, _dbClient);
if (backendVol != null && !backendVol.getInactive()) {
rgName = backendVol.getReplicationGroupInstance();
}
}
String label = snapshotName;
if (NullColumnValueGetter.isNotNullValue(rgName) && application != null) {
// There can be multiple RGs in a CG, in such cases generate unique name
if (volumes.size() > 1) {
label = String.format("%s-%s-%s", snapshotName, rgName, count++);
} else {
label = String.format("%s-%s", snapshotName, rgName);
}
} else if (volumes.size() > 1) {
label = String.format("%s-%s", snapshotName, count++);
}
BlockSnapshot snapshot = prepareSnapshotFromVolume(volume, snapshotName, label);
snapshot.setTechnologyType(snapshotType);
snapshot.setOpStatus(new OpStatusMap());
Operation op = new Operation();
op.setResourceType(ResourceOperationTypeEnum.CREATE_VOLUME_SNAPSHOT);
snapshot.getOpStatus().createTaskStatus(taskId, op);
snapshotURIs.add(snapshot.getId());
snapshots.add(snapshot);
}
_dbClient.createObject(snapshots);
return snapshots;
}
use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class FileQuotaDirectoryService method deactivateQuotaDirectory.
/**
* Deactivate Quota directory of file system, this will move the
* Quota directory to a "marked-for-delete" state
* <p>
* NOTE: This is an asynchronous operation.
*
* @param id
* the URN of the QuotaDirectory
* @param param
* QuotaDirectory delete param for optional force delete
* @brief Delete file system quota directory
* @return Task resource representation
* @throws com.emc.storageos.svcs.errorhandling.resources.InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/deactivate")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.OWN, ACL.ALL })
public TaskResourceRep deactivateQuotaDirectory(@PathParam("id") URI id, QuotaDirectoryDeleteParam param) throws InternalException {
_log.info("FileService::deactivateQtree Request recieved {}", id);
String task = UUID.randomUUID().toString();
ArgValidator.checkFieldUriType(id, QuotaDirectory.class, "id");
QuotaDirectory quotaDirectory = queryResource(id);
FileShare fs = queryFileShareResource(quotaDirectory.getParent().getURI());
ArgValidator.checkFieldNotNull(fs, "filesystem");
// if the delete request is with force flag!!!
if (param.getForceDelete()) {
_log.error("Quota directory delete operation is not supported with force delete {}", param.getForceDelete());
throw APIException.badRequests.quotaDirectoryDeleteNotSupported(param.getForceDelete());
} else {
// Fail to delete quota directory, if there are any dependency objects like exports, shares
if (quotaDirectoryHasExportsOrShares(fs, quotaDirectory.getName())) {
throw APIException.badRequests.resourceCannotBeDeleted("Quota directory " + quotaDirectory.getName() + " has exports/shares ");
}
}
Operation op = new Operation();
op.setResourceType(ResourceOperationTypeEnum.DELETE_FILE_SYSTEM_QUOTA_DIR);
quotaDirectory.getOpStatus().createTaskStatus(task, op);
fs.setOpStatus(new OpStatusMap());
fs.getOpStatus().createTaskStatus(task, op);
_dbClient.persistObject(fs);
_dbClient.persistObject(quotaDirectory);
// Now get ready to make calls into the controller
StorageSystem device = _dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
FileController controller = getController(FileController.class, device.getSystemType());
try {
controller.deleteQuotaDirectory(device.getId(), quotaDirectory.getId(), fs.getId(), task);
} catch (InternalException e) {
throw e;
}
auditOp(OperationTypeEnum.DELETE_FILE_SYSTEM_QUOTA_DIR, true, AuditLogManager.AUDITOP_BEGIN, quotaDirectory.getLabel(), quotaDirectory.getId().toString(), fs.getId().toString());
fs = _dbClient.queryObject(FileShare.class, fs.getId());
_log.debug("FileService::Quota directory Before sending response, FS ID : {}, Taks : {} ; Status {}", fs.getOpStatus().get(task), fs.getOpStatus().get(task).getStatus());
return toTask(quotaDirectory, task, op);
}
use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class WorkflowService method initTaskStatus.
/**
* Convenience method for initializing a task object with a status
*
* @param workflow export group
* @param task task ID
* @param status status to initialize with
* @param opType operation type
* @return operation object
*/
protected static Operation initTaskStatus(DbClient dbClient, Workflow workflow, String task, Operation.Status status, ResourceOperationTypeEnum opType) {
if (workflow.getOpStatus() == null) {
workflow.setOpStatus(new OpStatusMap());
}
Operation op = new Operation();
op.setResourceType(opType);
if (status == Operation.Status.ready) {
op.ready();
}
dbClient.createTaskOpStatus(Workflow.class, workflow.getId(), task, op);
return op;
}
use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class BlockVolumeIngestOrchestrator method ingestBlockObjects.
@Override
protected <T extends BlockObject> T ingestBlockObjects(IngestionRequestContext requestContext, Class<T> clazz) throws IngestionException {
UnManagedVolume unManagedVolume = requestContext.getCurrentUnmanagedVolume();
boolean unManagedVolumeExported = requestContext.getVolumeContext().isVolumeExported();
Volume volume = null;
List<BlockSnapshotSession> snapSessions = new ArrayList<BlockSnapshotSession>();
URI unManagedVolumeUri = unManagedVolume.getId();
String volumeNativeGuid = unManagedVolume.getNativeGuid().replace(VolumeIngestionUtil.UNMANAGEDVOLUME, VolumeIngestionUtil.VOLUME);
volume = VolumeIngestionUtil.checkIfVolumeExistsInDB(volumeNativeGuid, _dbClient);
// Check if ingested volume has export masks pending for ingestion.
if (isExportIngestionPending(volume, unManagedVolumeUri, unManagedVolumeExported)) {
return clazz.cast(volume);
}
if (null == volume) {
validateUnManagedVolume(unManagedVolume, requestContext.getVpool(unManagedVolume));
// @TODO Need to revisit this. In 8.x Provider, ReplicationGroup is automatically created when a volume is associated to a
// StorageGroup.
// checkUnManagedVolumeAddedToCG(unManagedVolume, virtualArray, tenant, project, vPool);
checkVolumeExportState(unManagedVolume, unManagedVolumeExported);
checkVPoolValidForExportInitiatorProtocols(requestContext.getVpool(unManagedVolume), unManagedVolume);
checkHostIOLimits(requestContext.getVpool(unManagedVolume), unManagedVolume, unManagedVolumeExported);
StoragePool pool = validateAndReturnStoragePoolInVAarray(unManagedVolume, requestContext.getVarray(unManagedVolume));
// validate quota is exceeded for storage systems and pools
checkSystemResourceLimitsExceeded(requestContext.getStorageSystem(), unManagedVolume, requestContext.getExhaustedStorageSystems());
checkPoolResourceLimitsExceeded(requestContext.getStorageSystem(), pool, unManagedVolume, requestContext.getExhaustedPools());
String autoTierPolicyId = getAutoTierPolicy(unManagedVolume, requestContext.getStorageSystem(), requestContext.getVpool(unManagedVolume));
validateAutoTierPolicy(autoTierPolicyId, unManagedVolume, requestContext.getVpool(unManagedVolume));
volume = createVolume(requestContext, volumeNativeGuid, pool, unManagedVolume, autoTierPolicyId);
}
if (volume != null) {
String syncActive = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.IS_SYNC_ACTIVE.toString(), unManagedVolume.getVolumeInformation());
boolean isSyncActive = (null != syncActive) ? Boolean.parseBoolean(syncActive) : false;
volume.setSyncActive(isSyncActive);
if (VolumeIngestionUtil.isFullCopy(unManagedVolume)) {
_logger.info("Setting clone related properties {}", unManagedVolume.getId());
String replicaState = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.REPLICA_STATE.toString(), unManagedVolume.getVolumeInformation());
volume.setReplicaState(replicaState);
String replicationGroupName = PropertySetterUtil.extractValueFromStringSet(SupportedVolumeInformation.FULL_COPY_CONSISTENCY_GROUP_NAME.toString(), unManagedVolume.getVolumeInformation());
if (replicationGroupName != null && !replicationGroupName.isEmpty()) {
volume.setReplicationGroupInstance(replicationGroupName);
}
}
// Create snapshot sessions for each synchronization aspect for the volume.
StringSet syncAspectInfoForVolume = PropertySetterUtil.extractValuesFromStringSet(SupportedVolumeInformation.SNAPSHOT_SESSIONS.toString(), unManagedVolume.getVolumeInformation());
if ((syncAspectInfoForVolume != null) && (!syncAspectInfoForVolume.isEmpty())) {
Project project = requestContext.getProject();
// If this is a vplex backend volume, then the front end project should be set as snapshot session's project
if (requestContext instanceof VplexVolumeIngestionContext && VolumeIngestionUtil.isVplexBackendVolume(unManagedVolume)) {
project = ((VplexVolumeIngestionContext) requestContext).getFrontendProject();
}
for (String syncAspectInfo : syncAspectInfoForVolume) {
String[] syncAspectInfoComponents = syncAspectInfo.split(":");
String syncAspectName = syncAspectInfoComponents[0];
String syncAspectObjPath = syncAspectInfoComponents[1];
// Make sure it is not already created.
URIQueryResultList queryResults = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotSessionBySessionInstance(syncAspectObjPath), queryResults);
Iterator<URI> queryResultsIter = queryResults.iterator();
if (!queryResultsIter.hasNext()) {
BlockSnapshotSession session = new BlockSnapshotSession();
session.setId(URIUtil.createId(BlockSnapshotSession.class));
session.setLabel(syncAspectName);
session.setSessionLabel(syncAspectName);
session.setParent(new NamedURI(volume.getId(), volume.getLabel()));
session.setProject(new NamedURI(project.getId(), project.getLabel()));
session.setStorageController(volume.getStorageController());
session.setSessionInstance(syncAspectObjPath);
StringSet linkedTargetURIs = new StringSet();
URIQueryResultList snapshotQueryResults = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotBySettingsInstance(syncAspectObjPath), snapshotQueryResults);
Iterator<URI> snapshotQueryResultsIter = snapshotQueryResults.iterator();
while (snapshotQueryResultsIter.hasNext()) {
linkedTargetURIs.add(snapshotQueryResultsIter.next().toString());
}
session.setLinkedTargets(linkedTargetURIs);
session.setOpStatus(new OpStatusMap());
snapSessions.add(session);
}
}
if (!snapSessions.isEmpty()) {
_dbClient.createObject(snapSessions);
}
}
}
// Note that a VPLEX backend volume can also be a snapshot target volume.
// When the VPLEX ingest orchestrator is executed, it gets the ingestion
// strategy for the backend volume and executes it. If the backend volume
// is both a snapshot and a VPLEX backend volume, this local volume ingest
// strategy is invoked and a Volume instance will result. That is fine because
// we need to represent that VPLEX backend volume. However, we also need a
// BlockSnapshot instance to represent the snapshot target volume. Therefore,
// if the unmanaged volume is also a snapshot target volume, we get and
// execute the local snapshot ingest strategy to create this BlockSnapshot
// instance and we add it to the created object list. Note that since the
// BlockSnapshot is added to the created objects list and the Volume and
// BlockSnapshot instance will have the same native GUID, we must be careful
// about adding the Volume to the created object list in the VPLEX ingestion
// strategy.
BlockObject snapshot = null;
if (VolumeIngestionUtil.isSnapshot(unManagedVolume)) {
String strategyKey = ReplicationStrategy.LOCAL.name() + "_" + VolumeType.SNAPSHOT.name();
IngestStrategy ingestStrategy = ingestStrategyFactory.getIngestStrategy(IngestStrategyEnum.getIngestStrategy(strategyKey));
snapshot = ingestStrategy.ingestBlockObjects(requestContext, BlockSnapshot.class);
requestContext.getBlockObjectsToBeCreatedMap().put(snapshot.getNativeGuid(), snapshot);
}
// Run this always when volume NO_PUBLIC_ACCESS
if (markUnManagedVolumeInactive(requestContext, volume)) {
_logger.info("All the related replicas and parent has been ingested ", unManagedVolume.getNativeGuid());
// RP masks.
if (!unManagedVolumeExported && !VolumeIngestionUtil.checkUnManagedResourceIsRecoverPointEnabled(unManagedVolume)) {
unManagedVolume.setInactive(true);
requestContext.getUnManagedVolumesToBeDeleted().add(unManagedVolume);
}
} else if (volume != null) {
_logger.info("Not all the parent/replicas of unManagedVolume {} have been ingested , hence marking as internal", unManagedVolume.getNativeGuid());
volume.addInternalFlags(INTERNAL_VOLUME_FLAGS);
for (BlockSnapshotSession snapSession : snapSessions) {
snapSession.addInternalFlags(INTERNAL_VOLUME_FLAGS);
}
_dbClient.updateObject(snapSessions);
}
return clazz.cast(volume);
}
use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method findOrCreateExportGroup.
/**
* Find or Create an ExportGroup.
*
* @param vplex -- VPLEX StorageSystem
* @param array -- Array StorageSystem
* @param initiators -- Collection<Initiator> representing VPLEX back-end ports.
* @param virtualArrayURI
* @param projectURI
* @param tenantURI
* @param numPaths Value of maxPaths to be put in ExportGroup
* @param unmanagedExportMask the unmanaged export mask
* @return existing or newly created ExportGroup (not yet persisted)
*/
private ExportGroup findOrCreateExportGroup(IngestionRequestContext requestContext, StorageSystem array, Collection<Initiator> initiators, URI virtualArrayURI, URI projectURI, URI tenantURI, int numPaths, UnManagedExportMask unmanagedExportMask) {
StorageSystem vplex = requestContext.getStorageSystem();
String arrayName = array.getSystemType().replace("block", "") + array.getSerialNumber().substring(array.getSerialNumber().length() - 4);
String groupName = unmanagedExportMask.getMaskName() + "_" + arrayName;
ExportGroup exportGroup = requestContext.findExportGroup(groupName, projectURI, virtualArrayURI, null, null);
if (null != exportGroup) {
_logger.info(String.format("Returning existing ExportGroup %s", exportGroup.getLabel()));
return exportGroup;
}
List<ExportGroup> exportGroups = CustomQueryUtility.queryActiveResourcesByConstraint(_dbClient, ExportGroup.class, PrefixConstraint.Factory.getFullMatchConstraint(ExportGroup.class, "label", groupName));
if (null != exportGroups && !exportGroups.isEmpty()) {
for (ExportGroup group : exportGroups) {
if (null != group) {
_logger.info(String.format("Returning existing ExportGroup %s", group.getLabel()));
exportGroup = group;
}
}
} else {
Map<String, ExportGroup> possibleExportGroups = new HashMap<String, ExportGroup>();
Set<String> initiatorUris = new HashSet<String>();
for (Initiator initiator : initiators) {
// Determine all the possible existing Export Groups
List<ExportGroup> groups = ExportUtils.getInitiatorExportGroups(initiator, _dbClient);
for (ExportGroup group : groups) {
if (!possibleExportGroups.containsKey(group.getId().toString())) {
possibleExportGroups.put(group.getId().toString(), group);
}
}
initiatorUris.add(initiator.getId().toString());
}
// If there are possible Export Groups, look for one with that matches on inits, varray, project, and tenant.
for (ExportGroup group : possibleExportGroups.values()) {
if (URIUtil.identical(group.getVirtualArray(), virtualArrayURI) && URIUtil.identical(group.getProject().getURI(), projectURI) && URIUtil.identical(group.getTenant().getURI(), tenantURI)) {
if (group.getInitiators().containsAll(initiatorUris)) {
_logger.info(String.format("Returning existing ExportGroup %s from database.", group.getLabel()));
return group;
}
}
}
// No existing group has the mask, let's create one.
exportGroup = new ExportGroup();
exportGroup.setLabel(groupName);
exportGroup.setProject(new NamedURI(projectURI, exportGroup.getLabel()));
exportGroup.setVirtualArray(vplex.getVirtualArray());
exportGroup.setTenant(new NamedURI(tenantURI, exportGroup.getLabel()));
exportGroup.setGeneratedName(groupName);
exportGroup.setVolumes(new StringMap());
exportGroup.setOpStatus(new OpStatusMap());
exportGroup.setVirtualArray(virtualArrayURI);
exportGroup.setNumPaths(numPaths);
// Add the initiators into the ExportGroup.
for (Initiator initiator : initiators) {
exportGroup.addInitiator(initiator);
}
_logger.info(String.format("Returning new ExportGroup %s", exportGroup.getLabel()));
}
return exportGroup;
}
Aggregations