use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class FileDeviceController method applyFilePolicy.
@Override
public void applyFilePolicy(URI storage, URI sourceFS, URI policyURI, String taskId) throws ControllerException {
FileShare fsObj = null;
StorageSystem storageObj = null;
try {
fsObj = _dbClient.queryObject(FileShare.class, sourceFS);
VirtualPool vpool = _dbClient.queryObject(VirtualPool.class, fsObj.getVirtualPool());
Project project = _dbClient.queryObject(Project.class, fsObj.getProject());
TenantOrg tenantOrg = _dbClient.queryObject(TenantOrg.class, project.getTenantOrg());
storageObj = _dbClient.queryObject(StorageSystem.class, storage);
FileDeviceInputOutput args = new FileDeviceInputOutput();
FilePolicy filePolicy = _dbClient.queryObject(FilePolicy.class, policyURI);
args.setOpId(taskId);
args.addFSFileObject(fsObj);
args.setVPool(vpool);
args.setTenantOrg(tenantOrg);
args.setProject(project);
args.setFileProtectionPolicy(filePolicy);
setVirtualNASinArgs(fsObj.getVirtualNAS(), args);
WorkflowStepCompleter.stepExecuting(taskId);
BiosCommandResult result = getDevice(storageObj.getSystemType()).doApplyFilePolicy(storageObj, args);
if (result.getCommandPending()) {
return;
} else if (result.isCommandSuccess()) {
_log.info("File policy: {} applied successfully", filePolicy.getFilePolicyName());
WorkflowStepCompleter.stepSucceded(taskId);
} else {
WorkflowStepCompleter.stepFailed(taskId, result.getServiceCoded());
}
} catch (Exception e) {
_log.error("Unable to apply file policy: {} to file system: {}", policyURI, fsObj.getId());
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
WorkflowStepCompleter.stepFailed(taskId, serviceError);
updateTaskStatus(taskId, fsObj, e);
}
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class FileDeviceController method assignFileReplicationPolicyToProjects.
@Override
public void assignFileReplicationPolicyToProjects(URI storageSystemURI, URI targetSystemURI, URI sourceVNasURI, URI targetVArrayURI, URI targetVNasURI, URI filePolicyToAssign, URI vpoolURI, URI projectURI, String opId) throws InternalException {
try {
WorkflowStepCompleter.stepExecuting(opId);
StorageSystem sourceSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
StorageSystem targetSystem = _dbClient.queryObject(StorageSystem.class, targetSystemURI);
FilePolicy filePolicy = _dbClient.queryObject(FilePolicy.class, filePolicyToAssign);
VirtualPool vpool = _dbClient.queryObject(VirtualPool.class, vpoolURI);
Project project = _dbClient.queryObject(Project.class, projectURI);
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, project.getTenantOrg());
VirtualArray targetVarray = _dbClient.queryObject(VirtualArray.class, targetVArrayURI);
VirtualNAS sourceVNAS = null;
VirtualNAS targetVNAS = null;
FileDeviceInputOutput sourceArgs = new FileDeviceInputOutput();
FileDeviceInputOutput targetArgs = new FileDeviceInputOutput();
sourceArgs.setFileProtectionPolicy(filePolicy);
sourceArgs.setVPool(vpool);
sourceArgs.setProject(project);
sourceArgs.setTenantOrg(tenant);
targetArgs.setVarray(targetVarray);
if (sourceVNasURI != null) {
sourceVNAS = _dbClient.queryObject(VirtualNAS.class, sourceVNasURI);
sourceArgs.setvNAS(sourceVNAS);
targetArgs.setSourceVNAS(sourceVNAS);
}
targetArgs.setTarget(true);
targetArgs.setSourceSystem(sourceSystem);
targetArgs.setVPool(vpool);
targetArgs.setProject(project);
targetArgs.setTenantOrg(tenant);
if (targetVNasURI != null) {
targetVNAS = _dbClient.queryObject(VirtualNAS.class, targetVNasURI);
targetArgs.setvNAS(targetVNAS);
}
_log.info("Assigning file snapshot policy: {} to vpool {} and project: {}", filePolicyToAssign, vpoolURI, projectURI);
BiosCommandResult result = getDevice(sourceSystem.getSystemType()).checkFileReplicationPolicyExistsOrCreate(sourceSystem, targetSystem, sourceArgs, targetArgs);
if (result.getCommandPending()) {
return;
}
if (!result.isCommandSuccess() && !result.getCommandPending()) {
WorkflowStepCompleter.stepFailed(opId, result.getServiceCoded());
}
if (result.isCommandSuccess()) {
WorkflowStepCompleter.stepSucceded(opId);
}
} catch (Exception e) {
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
WorkflowStepCompleter.stepFailed(opId, serviceError);
}
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class IsilonFileStorageDevice method updateLocalTargetFileSystemPath.
/**
* @param dbClient
* @param project
* @param storageSystem
* @return
*/
public void updateLocalTargetFileSystemPath(StorageSystem system, FileDeviceInputOutput args) {
VirtualPool vpool = args.getVPool();
Project project = args.getProject();
FileShare fs = args.getFs();
if (fs.getPersonality() != null && fs.getPersonality().equalsIgnoreCase(PersonalityTypes.TARGET.name())) {
List<FilePolicy> replicationPolicies = FileOrchestrationUtils.getReplicationPolices(_dbClient, vpool, project, null);
if (replicationPolicies != null && !replicationPolicies.isEmpty()) {
if (replicationPolicies.size() > 1) {
_log.warn("More than one replication policy found {}", replicationPolicies.size());
} else {
FilePolicy replPolicy = replicationPolicies.get(0);
if (replPolicy.getFileReplicationType().equalsIgnoreCase(FileReplicationType.LOCAL.name())) {
// For local replication, the path should be different
// add localTaget to file path at directory level where the policy is applied!!!
String mountPath = generatePathForLocalTarget(replPolicy, fs, args);
// replace extra forward slash with single one
mountPath = mountPath.replaceAll("/+", "/");
_log.info("Mount path to mount the Isilon File System {}", mountPath);
args.setFsMountPath(mountPath);
args.setFsNativeGuid(args.getFsMountPath());
args.setFsNativeId(args.getFsMountPath());
args.setFsPath(args.getFsMountPath());
}
}
} else if (fs.getLabel().contains("-localTarget")) {
String mountPath = fs.getNativeId() + "_localTarget";
// replace extra forward slash with single one
mountPath = mountPath.replaceAll("/+", "/");
_log.info("Mount path to mount the Isilon File System {}", mountPath);
args.setFsMountPath(mountPath);
args.setFsNativeGuid(args.getFsMountPath());
args.setFsNativeId(args.getFsMountPath());
args.setFsPath(args.getFsMountPath());
}
}
return;
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class SmisBlockSnapshotSessionUnlinkTargetJob method getBlockConsistencyGroupForPromotedSnapshot.
/**
* Given a CG snapshot that is to be promoted, create a new BlockConsistencyGroup based on its
* ReplicationGroup. A group cache parameter is accepted and serves to cache BlockConsistencyGroup
* instances that have already been created.
*
* @param snapshot BlockSnapshot being promoted.
* @param groupCache Cache mapping of ReplicationGroup name to BlockConsistencyGroup instances.
* @param dbClient Database client.
* @return BlockConsistencyGroup URI or null.
*/
private URI getBlockConsistencyGroupForPromotedSnapshot(BlockSnapshot snapshot, Map<String, BlockConsistencyGroup> groupCache, DbClient dbClient) {
if (!snapshot.hasConsistencyGroup()) {
return null;
}
// Create new BlockConsistencyGroup instances to track the existing target groups.
String groupInstance = snapshot.getReplicationGroupInstance();
BlockConsistencyGroup cg = null;
if (groupCache.containsKey(groupInstance)) {
cg = groupCache.get(groupInstance);
} else {
cg = new BlockConsistencyGroup();
cg.setId(URIUtil.createId(BlockConsistencyGroup.class));
Project project = dbClient.queryObject(Project.class, snapshot.getProject().getURI());
cg.setProject(new NamedURI(project.getId(), project.getLabel()));
cg.setTenant(new NamedURI(project.getTenantOrg().getURI(), project.getTenantOrg().getName()));
String repGrpName = groupInstance.substring(groupInstance.indexOf("+") + 1, groupInstance.length());
cg.setLabel(repGrpName);
StringSetMap map = new StringSetMap();
map.put(snapshot.getStorageController().toString(), new StringSet(Arrays.asList(repGrpName)));
cg.setSystemConsistencyGroups(map);
StringSet types = new StringSet();
types.add(BlockConsistencyGroup.Types.LOCAL.name());
cg.setTypes(types);
cg.setRequestedTypes(types);
cg.setStorageController(snapshot.getStorageController());
s_logger.info("Creating new BlockConsistencyGroup for ReplicationGroup {} with ID: {}", groupInstance, cg.getId());
dbClient.createObject(cg);
groupCache.put(groupInstance, cg);
}
return cg.getId();
}
use of com.emc.storageos.db.client.model.Project in project coprhd-controller by CoprHD.
the class VPlexDeviceController method addStepsForCreateVolumes.
/**
* {@inheritDoc}
* <p>
* Here we should have already created any underlying volumes. What remains to be done: 1. Export the underlying Storage Volumes from
* the array to the VPlex. 2. Create the Virtual volume. 3. If a consistency group was specified, then create the consistency group if
* it does not exist, then add the volumes. If it already exists, just add the volumes.
*/
@Override
public String addStepsForCreateVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> volumes, String taskId) throws ControllerException {
try {
// Get only the VPlex volumes from the descriptors.
List<VolumeDescriptor> vplexVolumes = VolumeDescriptor.filterByType(volumes, new VolumeDescriptor.Type[] { VolumeDescriptor.Type.VPLEX_VIRT_VOLUME }, new VolumeDescriptor.Type[] {});
// If there are no VPlex volumes, just return
if (vplexVolumes.isEmpty()) {
_log.info("No VPLEX create volume steps required.");
return waitFor;
}
_log.info("Adding VPLEX create volume steps...");
// Segregate the volumes by Device.
Map<URI, List<VolumeDescriptor>> vplexDescMap = VolumeDescriptor.getDeviceMap(vplexVolumes);
// For each VPLEX to be provisioned (normally there is only one)
String lastStep = waitFor;
for (URI vplexURI : vplexDescMap.keySet()) {
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
// Build some needed maps to get started.
Type[] types = new Type[] { Type.BLOCK_DATA, Type.SRDF_SOURCE, Type.SRDF_EXISTING_SOURCE, Type.SRDF_TARGET };
Map<URI, StorageSystem> arrayMap = buildArrayMap(vplexSystem, volumes, types);
Map<URI, Volume> volumeMap = buildVolumeMap(vplexSystem, volumes, Type.VPLEX_VIRT_VOLUME);
// Set the project and tenant to those of an underlying volume.
// These are used to set the project and tenant of a new ExportGroup if needed.
Volume firstVolume = volumeMap.values().iterator().next();
Project vplexProject = VPlexUtil.lookupVplexProject(firstVolume, vplexSystem, _dbClient);
URI tenantURI = vplexProject.getTenantOrg().getURI();
_log.info("Project is {}, Tenant is {}", vplexProject.getId(), tenantURI);
try {
// Now we need to do the necessary zoning and export steps to ensure
// the VPlex can see these new backend volumes.
lastStep = createWorkflowStepsForBlockVolumeExport(workflow, vplexSystem, arrayMap, volumeMap, vplexProject.getId(), tenantURI, lastStep);
} catch (Exception ex) {
_log.error("Could not create volumes for vplex: " + vplexURI, ex);
TaskCompleter completer = new VPlexTaskCompleter(Volume.class, vplexURI, taskId, null);
ServiceError serviceError = VPlexApiException.errors.jobFailed(ex);
completer.error(_dbClient, serviceError);
throw ex;
}
Map<URI, URI> computeResourceMap = new HashMap<>();
List<VolumeDescriptor> vplexDescrs = vplexDescMap.get(vplexURI);
for (VolumeDescriptor descr : vplexDescrs) {
URI computeResourceURI = descr.getComputeResource();
if (computeResourceURI != null) {
computeResourceMap.put(descr.getVolumeURI(), computeResourceURI);
}
}
// Now create each of the Virtual Volumes that may be necessary.
List<URI> vplexVolumeURIs = VolumeDescriptor.getVolumeURIs(vplexDescrs);
// Now make a Step to create the VPlex Virtual volume.
// This will be done from this controller.
String stepId = workflow.createStepId();
lastStep = workflow.createStep(VPLEX_STEP, String.format("VPlex %s creating virtual volumes:%n%s", vplexSystem.getId().toString(), BlockDeviceController.getVolumesMsg(_dbClient, vplexVolumeURIs)), lastStep, vplexURI, vplexSystem.getSystemType(), this.getClass(), createVirtualVolumesMethod(vplexURI, vplexVolumeURIs, computeResourceMap), rollbackCreateVirtualVolumesMethod(vplexURI, vplexVolumeURIs, stepId), stepId);
// Get one of the vplex volumes so we can determine what ConsistencyGroupManager
// implementation to use.
Volume vol = getDataObject(Volume.class, vplexVolumeURIs.get(0), _dbClient);
ConsistencyGroupManager consistencyGroupManager = getConsistencyGroupManager(vol);
// Deal with CGs.
// Filter out any VPlex Volumes that front the SRDF targets for now.
List<URI> volsForCG = VPlexSrdfUtil.filterOutVplexSrdfTargets(_dbClient, vplexVolumeURIs);
if (!volsForCG.isEmpty()) {
lastStep = consistencyGroupManager.addStepsForCreateConsistencyGroup(workflow, lastStep, vplexSystem, volsForCG, false);
}
// If there are VPlex Volumes fronting SRDF targets, handle them.
// They will go into a separate CG that represents the SRDF targets.
// That CG will have already been generated?
volsForCG = VPlexSrdfUtil.returnVplexSrdfTargets(_dbClient, vplexVolumeURIs);
if (!volsForCG.isEmpty()) {
lastStep = consistencyGroupManager.addStepsForAddingVolumesToSRDFTargetCG(workflow, vplexSystem, volsForCG, lastStep);
}
_log.info("Added steps for creating consistency group");
}
return lastStep;
} catch (Exception ex) {
throw VPlexApiException.exceptions.addStepsForCreateVolumesFailed(ex);
}
}
Aggregations