use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class RPHelper method createRPExportGroup.
/**
* Creates an export group with the proper settings for RP usage
*
* @param exportGroupGeneratedName the generated ExportGroup name to use
* @param virtualArray virtual array
* @param project project
* @param numPaths number of paths
* @param isJournalExport flag indicating if this is an ExportGroup intended only for journal volumes
* @return an export group
*/
public static ExportGroup createRPExportGroup(String exportGroupGeneratedName, VirtualArray virtualArray, Project project, Integer numPaths, boolean isJournalExport) {
ExportGroup exportGroup;
exportGroup = new ExportGroup();
exportGroup.setId(URIUtil.createId(ExportGroup.class));
exportGroup.addInternalFlags(Flag.INTERNAL_OBJECT, Flag.SUPPORTS_FORCE, Flag.RECOVERPOINT);
exportGroup.setProject(new NamedURI(project.getId(), project.getLabel()));
exportGroup.setVirtualArray(virtualArray.getId());
exportGroup.setTenant(new NamedURI(project.getTenantOrg().getURI(), project.getTenantOrg().getName()));
exportGroup.setGeneratedName(exportGroupGeneratedName);
// When created by CoprHD natively, it's usually the CG name.
exportGroup.setLabel(exportGroupGeneratedName);
exportGroup.setVolumes(new StringMap());
exportGroup.setOpStatus(new OpStatusMap());
// TODO: May need to use a default size or compute based on the contents of the export mask.
exportGroup.setNumPaths(numPaths);
exportGroup.setType(ExportGroupType.Cluster.name());
exportGroup.setZoneAllInitiators(true);
// If this is an exportGroup intended only for journal volumes, set the RECOVERPOINT_JOURNAL flag
if (isJournalExport) {
exportGroup.addInternalFlags(Flag.RECOVERPOINT_JOURNAL);
}
return exportGroup;
}
use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class BlockDeviceController method deactivateMirror.
/**
* An orchestration controller method for detaching and deleting a mirror
*
* @param storage
* URI of storage controller.
* @param mirrorList
* List of URIs of block mirrors
* @param promotees
* List of URIs of promoted volumes
* @param isCG
* CG mirror or not
* @param opId
* Operation ID
* @throws ControllerException
*/
@Override
public void deactivateMirror(URI storage, List<URI> mirrorList, List<URI> promotees, Boolean isCG, String opId) throws ControllerException {
_log.info("deactivateMirror: START");
TaskCompleter taskCompleter = null;
String mirrorStr = Joiner.on("\t").join(mirrorList);
try {
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storage);
Workflow workflow = _workflowService.getNewWorkflow(this, "deactivateMirror", true, opId);
taskCompleter = new BlockMirrorDeactivateCompleter(mirrorList, promotees, opId);
ControllerUtils.checkMirrorConsistencyGroup(mirrorList, _dbClient, taskCompleter);
String detachStep = workflow.createStepId();
Workflow.Method detach = detachMirrorMethod(storage, mirrorList, isCG);
workflow.createStep("deactivate", "detaching mirror volume: " + mirrorStr, null, storage, storageSystem.getSystemType(), getClass(), detach, null, detachStep);
// for single volume mirror, the mirror will be deleted
List<URI> mirrorsToDelete = mirrorList;
// for group mirror, find mirrors to be deleted and mirrors to be promoted, and do the promotion
if (isCG) {
mirrorsToDelete = new ArrayList<URI>();
List<Volume> promotedVolumes = _dbClient.queryObject(Volume.class, promotees);
List<URI> orderedMirrorsToPromote = new ArrayList<URI>();
List<URI> orderedPromotedVolumes = new ArrayList<URI>();
for (URI mirror : mirrorList) {
URI promotedVolume = null;
for (Volume promotee : promotedVolumes) {
OpStatusMap statusMap = promotee.getOpStatus();
for (Map.Entry<String, Operation> entry : statusMap.entrySet()) {
Operation operation = entry.getValue();
if (operation.getAssociatedResourcesField().contains(mirror.toString())) {
promotedVolume = promotee.getId();
}
}
}
if (promotedVolume != null) {
orderedMirrorsToPromote.add(mirror);
orderedPromotedVolumes.add(promotedVolume);
} else {
mirrorsToDelete.add(mirror);
}
}
if (!orderedMirrorsToPromote.isEmpty()) {
// Create a step for promoting the mirrors.
String stepId = workflow.createStep(PROMOTE_MIRROR_STEP_GROUP, String.format("Promote mirrors : %s", Joiner.on("\t").join(orderedMirrorsToPromote)), detachStep, storage, storageSystem.getSystemType(), this.getClass(), promoteMirrorMethod(orderedMirrorsToPromote, orderedPromotedVolumes, isCG), null, null);
}
}
String deleteStep = workflow.createStepId();
Workflow.Method delete = deleteMirrorMethod(storage, mirrorsToDelete, isCG);
workflow.createStep("deactivate", "deleting mirror volume: " + Joiner.on("\t").join(mirrorsToDelete), detachStep, storage, storageSystem.getSystemType(), getClass(), delete, null, deleteStep);
String successMessage = String.format("Successfully deactivated mirror %s on StorageArray %s", mirrorStr, storage);
workflow.executePlan(taskCompleter, successMessage);
} catch (Exception e) {
if (_log.isErrorEnabled()) {
String msg = String.format("Deactivate mirror failed for mirror %s", mirrorStr);
_log.error(msg);
}
if (taskCompleter != null) {
String opName = ResourceOperationTypeEnum.DEACTIVATE_VOLUME_MIRROR.getName();
ServiceError serviceError = DeviceControllerException.errors.jobFailedOp(opName);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.deactivateMirrorFailed(e);
}
}
}
use of com.emc.storageos.db.client.model.OpStatusMap in project coprhd-controller by CoprHD.
the class ControllerUtils method isOperationInProgress.
/**
* This function looks first at the logical pools and updates them with physical
* capacity information, then updates the physical pools.
* If physical pools are removed from the storage system it marks them inactive.
*
* @param storage
* @param physicalHardware
* @return
*
* public static
* boolean reconcilePhysicalHardware(URI storage,
* List<Object> physicalHardware,
* DbClient dbClient) {
* Logger log = LoggerFactory.getLogger(ControllerUtils.class);
* try {
*
* // First update the logical pools represented by the physical pool
*
* List<URI> poolURIs = dbClient.queryByConstraint
* (ContainmentConstraint.Factory
* .getStorageDeviceStoragePoolConstraint(storage));
* List<StoragePool> pools = dbClient.queryObject(StoragePool
* .class, poolURIs);
* boolean poolFound;
* for(StoragePool pool : pools){
* poolFound = false;
* for(Object obj : physicalHardware){
* if (obj instanceof PhysicalStoragePool) {
* // the type and ID must match
* PhysicalStoragePool psp = (PhysicalStoragePool) obj;
* if (pool.getControllerParams().get(StoragePool.ControllerParam.NativeId.name()).equals(psp.getNativeId())&&
* pool.getControllerParams().get(StoragePool.ControllerParam.PoolType.name()).equals(psp.getType())) {
* pool.setFreeCapacity(psp.getFreeCapacity());
* pool.setTotalCapacity(psp.getTotalCapacity());
* pool.setLargestContiguousBlock(psp
* .getLargestContiguousBlock());
* pool.setSubscribedCapacity(psp.getSubscribedCapacity());
* log.info(String.format("Logical pool %1$s updated by " +
* "physical storage pool %2$s/%3$s",
* pool.getId().toString(),
* psp.getType(), psp.getNativeId()));
* dbClient.persistObject(pool);
* poolFound = true;
* break;
* }
* }
* }
* if(poolFound == false){
* // probably a good indication this pool is not valid
* //pool.setInactive(true);
* //dbClient.persistObject(pool);
* log.warn(String.format("Logical pool %1$s not found on storage system",
* pool.getId().toString()));
* }
* }
*
* // Now update the physical pools obtained from controller
*
* poolURIs = dbClient.queryByConstraint(ContainmentConstraint.Factory
* .getStorageDevicePhysicalPoolConstraint(storage));
* List<PhysicalStoragePool> physicalPools = dbClient.queryObject(PhysicalStoragePool.class, poolURIs);
* Map<URI,PhysicalStoragePool> newPools = new HashMap<URI,PhysicalStoragePool>();
* // save the set of physical pools so we can tell if there are new ones
* for (Object obj : physicalHardware) {
* if (obj instanceof PhysicalStoragePool) {
* PhysicalStoragePool psp = (PhysicalStoragePool) obj;
* psp.setId(URIUtil.createId(PhysicalStoragePool.class));
* psp.setInactive(false);
* psp.setStorageDevice(storage);
* newPools.put(psp.getId(),psp);
* }
* }
* for (PhysicalStoragePool pool : physicalPools) {
* poolFound = false;
* for (Object obj : physicalHardware) {
* if (obj instanceof PhysicalStoragePool) {
* PhysicalStoragePool psp = (PhysicalStoragePool) obj;
* // native ID and type must match
* if (pool.getNativeId().equals(psp.getNativeId()) &&
* pool.getType().equals(psp.getType())) {
* newPools.remove(psp.getId());
* psp.setId(pool.getId());
* log.info(String.format("Updated physical storage pool %1$s/%2$s:%3$s %4$s",
* psp.getType(), psp.getNativeId(),
* pool.getId().toString(),
* pool.getLabel()));
* dbClient.persistObject(psp);
* poolFound = true;
* break;
* }
* }
* }
* if(poolFound==false){
* // this pool is no longer on array
* log.info(String.format("Inactivated Pool %1$s", pool.getId()));
* dbClient.markForDeletion(pool);
* }
* }
*
* // add new pools
* Iterator<Map.Entry<URI,PhysicalStoragePool>> itr = newPools.entrySet().iterator();
* while(itr.hasNext()){
* Map.Entry<URI, PhysicalStoragePool> entry = itr.next();
* PhysicalStoragePool psp = entry.getValue();
* log.info(String.format("New physical storage pool %1$s/%2$s:%3$s %4$s",
* psp.getType(),psp.getNativeId(),
* psp.getId().toString(),
* psp.getLabel()));
* dbClient.persistObject(psp);
* }
* return true;
* } catch (IOException e) {
* log.error("Exception while trying to handle results from " +
* "getPhysicalInventory", e);
* }
* return false;
* }
*/
/**
* returns if operation (besides opId) is pending
*
* @param id id of resource
* @param opId operation id for current operation
* @param resource instance of resource
* @return
*/
public static boolean isOperationInProgress(URI id, String opId, DataObject resource) {
OpStatusMap ops = resource.getOpStatus();
Set<Map.Entry<String, Operation>> opSet = ops.entrySet();
Iterator<Map.Entry<String, Operation>> opItr = opSet.iterator();
while (opItr.hasNext()) {
Map.Entry<String, Operation> entry = opItr.next();
if (entry.getValue().getStatus().equals(Operation.Status.pending.toString())) {
if (entry.getKey().equals(opId)) {
// our operation, pass
continue;
}
//
return true;
}
}
return false;
}
Aggregations