use of com.emc.storageos.db.client.model.DiscoveredSystemObject in project coprhd-controller by CoprHD.
the class ExportWorkflowUtils method generateExportGroupUpdateWorkflow.
/**
* Creates the workflow for one export mask (storage system) for an update export
* group call. It creates a single step in the main workflow that wraps a workflow
* with necessary steps to:
* <ol>
* <li>add block objects (volumes/snapshots)</li>
* <li>remove volumes</li>
* <li>add initiators</li>
* <li>remove initiators</li>
* </ol>
* The steps are created based on the diff between the current and the requested for
* the storage system export mask
*
* @param workflow the main workflow
* @param wfGroupId the workflow group Id, if any
* @param waitFor the id of a step on which this workflow has to wait, if any
* @param exportGroupUri the export group being updated
* @param exportMask the export mask for the storage system
* @param addedBlockObjects the map of block objects to be added
* @param removedBlockObjects the map of block objects to be removed
* @param addedInitiators the new list of initiators to be added
* @param removedInitiators the new list of initiators to be removed
* @param blockStorageControllerUri the block storage controller. This will always
* be used for adding/removing initiators as we
* do not want a protection controller doing this.
* @param workFlowList holds workflow and sub workflow instances to release all locks during failure
* @param storageUri the storage controller used to perform the export update.
* This can be either a block storage controller or protection
* controller.
* @return the id of the wrapper step that was added to main workflow
* @throws IOException
* @throws WorkflowException
* @throws WorkflowRestartedException
*/
public String generateExportGroupUpdateWorkflow(Workflow workflow, String wfGroupId, String waitFor, URI exportGroupUri, ExportMask exportMask, Map<URI, Integer> addedBlockObjects, Map<URI, Integer> removedBlockObjects, List<URI> addedInitiators, List<URI> removedInitiators, URI blockStorageControllerUri, List<Workflow> workflowList) throws IOException, WorkflowException, WorkflowRestartedException {
// Filter the addedInitiators for non VPLEX system by the Export Group varray.
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupUri);
addedInitiators = ExportUtils.filterNonVplexInitiatorsByExportGroupVarray(exportGroup, addedInitiators, blockStorageControllerUri, _dbClient);
if (allCollectionsAreEmpty(addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators)) {
_log.info(String.format("There is no export updated required for %s", blockStorageControllerUri.toString()));
return null;
}
// We would rather the task be the child stepID of the parent workflow's stepID.
// This helps us to preserve parent/child relationships.
String exportGroupUpdateStepId = workflow.createStepId();
Workflow storageWorkflow = newWorkflow("storageSystemExportGroupUpdate", false, exportGroupUpdateStepId);
workflowList.add(storageWorkflow);
DiscoveredSystemObject storageSystem = getStorageSystem(_dbClient, blockStorageControllerUri);
String stepId = null;
// We willLock the host/storage system duples necessary for the workflows.
// There are two possibilities about locking. Here we just generate the lockKeys.
List<URI> lockedInitiatorURIs = new ArrayList<URI>();
lockedInitiatorURIs.addAll(addedInitiators);
lockedInitiatorURIs.addAll(StringSetUtil.stringSetToUriList(exportGroup.getInitiators()));
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), lockedInitiatorURIs, blockStorageControllerUri);
// are getting replaced.
if (addedInitiators != null && !addedInitiators.isEmpty()) {
stepId = generateExportGroupAddInitiators(storageWorkflow, null, stepId, exportGroupUri, blockStorageControllerUri, addedInitiators);
}
if (removedInitiators != null && !removedInitiators.isEmpty()) {
stepId = generateExportGroupRemoveInitiators(storageWorkflow, null, stepId, exportGroupUri, blockStorageControllerUri, removedInitiators);
}
// ends being a problem, we would need to tackle this issue
if (removedBlockObjects != null && !removedBlockObjects.isEmpty()) {
Map<URI, Integer> objectsToRemove = new HashMap<URI, Integer>(removedBlockObjects);
ProtectionExportController protectionExportController = getProtectionExportController();
stepId = protectionExportController.addStepsForExportGroupRemoveVolumes(storageWorkflow, null, stepId, exportGroupUri, objectsToRemove, blockStorageControllerUri);
if (!objectsToRemove.isEmpty()) {
// Unexport the remaining block objects.
_log.info(String.format("Generating exportGroupRemoveVolumes step for objects %s associated with storage system [%s]", objectsToRemove, blockStorageControllerUri));
List<URI> objectsToRemoveList = new ArrayList<URI>(objectsToRemove.keySet());
stepId = generateExportGroupRemoveVolumes(storageWorkflow, null, stepId, blockStorageControllerUri, exportGroupUri, objectsToRemoveList);
}
}
if (addedBlockObjects != null && !addedBlockObjects.isEmpty()) {
Map<URI, Integer> objectsToAdd = new HashMap<URI, Integer>(addedBlockObjects);
ProtectionExportController protectionExportController = getProtectionExportController();
stepId = protectionExportController.addStepsForExportGroupAddVolumes(storageWorkflow, null, stepId, exportGroupUri, objectsToAdd, blockStorageControllerUri);
if (!objectsToAdd.isEmpty()) {
// Export the remaining block objects.
_log.info(String.format("Generating exportGroupAddVolumes step for objects %s associated with storage system [%s]", objectsToAdd.keySet(), blockStorageControllerUri));
stepId = generateExportGroupAddVolumes(storageWorkflow, null, stepId, blockStorageControllerUri, exportGroupUri, objectsToAdd);
}
}
boolean addObject = (addedInitiators != null && !addedInitiators.isEmpty()) || (addedBlockObjects != null && !addedBlockObjects.isEmpty());
if (exportMask == null && addObject) {
// recreate export mask only for add initiator/volume
if (addedInitiators == null) {
addedInitiators = new ArrayList<URI>();
}
if (addedInitiators.isEmpty()) {
addedInitiators.addAll(getInitiators(exportGroup));
}
// Add block volumes already in the export group
if (exportGroup.getVolumes() != null) {
for (String key : exportGroup.getVolumes().keySet()) {
BlockObject bobject = BlockObject.fetch(_dbClient, URI.create(key));
if (bobject.getStorageController().equals(blockStorageControllerUri)) {
addedBlockObjects.put(URI.create(key), Integer.valueOf(exportGroup.getVolumes().get(key)));
}
}
}
// Acquire locks for the parent workflow.
boolean acquiredLocks = getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "ExportMaskUpdate: " + exportGroup.getLabel());
}
Map<URI, Integer> objectsToAdd = new HashMap<URI, Integer>(addedBlockObjects);
ProtectionExportController protectionController = getProtectionExportController();
waitFor = protectionController.addStepsForExportGroupCreate(workflow, wfGroupId, waitFor, exportGroupUri, objectsToAdd, blockStorageControllerUri, addedInitiators);
if (!objectsToAdd.isEmpty()) {
// There are no export BlockObjects tied to the current storage system that have an associated protection
// system. We can just create a step to call the block controller directly for export group create.
_log.info(String.format("Generating exportGroupCreate steps for objects %s associated with storage system [%s]", objectsToAdd, blockStorageControllerUri));
// Add the new block objects to the existing ones and send all down
waitFor = generateExportGroupCreateWorkflow(workflow, wfGroupId, waitFor, blockStorageControllerUri, exportGroupUri, addedBlockObjects, addedInitiators);
}
return waitFor;
}
try {
// Acquire locks for the storageWorkflow which is started just below.
boolean acquiredLocks = getWorkflowService().acquireWorkflowLocks(storageWorkflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "ExportMaskUpdate: " + exportMask.getMaskName());
}
// There will not be a rollback step for the overall update instead
// the code allows the user to retry update as needed.
Workflow.Method method = ExportWorkflowEntryPoints.exportGroupUpdateMethod(blockStorageControllerUri, exportGroupUri, storageWorkflow);
return newWorkflowStep(workflow, wfGroupId, String.format("Updating export (%s) on storage array %s", exportGroupUri, storageSystem.getNativeGuid()), storageSystem, method, null, waitFor, exportGroupUpdateStepId);
} catch (Exception ex) {
getWorkflowService().releaseAllWorkflowLocks(storageWorkflow);
throw ex;
}
}
use of com.emc.storageos.db.client.model.DiscoveredSystemObject in project coprhd-controller by CoprHD.
the class DataCollectionJobScheduler method enqueueJobs.
private void enqueueJobs(List<DataCollectionJob> jobs) {
for (DataCollectionJob job : jobs) {
try {
DataCollectionTaskCompleter completer = job.getCompleter();
DiscoveredSystemObject system = (DiscoveredSystemObject) _dbClient.queryObject(completer.getType(), completer.getId());
if (isDataCollectionJobSchedulingNeeded(system, job)) {
job.schedule(_dbClient);
if (job instanceof DataCollectionArrayAffinityJob) {
((ArrayAffinityDataCollectionTaskCompleter) completer).setLastStatusMessage(_dbClient, "");
} else {
system.setLastDiscoveryStatusMessage("");
_dbClient.updateObject(system);
}
ControllerServiceImpl.enqueueDataCollectionJob(job);
} else {
_logger.info("Skipping {} Job for {}", job.getType(), completer.getId());
if (!job.isSchedulerJob()) {
job.setTaskReady(_dbClient, "The discovery for this system is currently running or was run quite recently. Resubmit this request at a later time, if needed.");
}
}
} catch (Exception e) {
_logger.error("Failed to enqueue {} Job {}", job.getType(), e.getMessage());
if (!job.isSchedulerJob()) {
try {
job.setTaskError(_dbClient, DeviceControllerErrors.dataCollectionErrors.failedToEnqueue(job.getType(), e));
} catch (Exception ex) {
_logger.warn("Exception occurred while updating task status", ex);
}
}
}
}
}
use of com.emc.storageos.db.client.model.DiscoveredSystemObject in project coprhd-controller by CoprHD.
the class DiscoverTaskCompleter method updateObjectState.
protected void updateObjectState(DbClient dbClient, DiscoveredDataObject.DataCollectionJobStatus jobStatus) {
Class type = getType();
if (DiscoveredSystemObject.class.isAssignableFrom(type)) {
try {
DiscoveredSystemObject dbObject = (DiscoveredSystemObject) DataObject.createInstance(type, getId());
dbObject.trackChanges();
dbObject.setDiscoveryStatus(jobStatus.toString());
dbClient.persistObject(dbObject);
} catch (InstantiationException ex) {
DatabaseException.fatals.queryFailed(ex);
} catch (IllegalAccessException ex) {
DatabaseException.fatals.queryFailed(ex);
}
} else {
throw DeviceControllerException.exceptions.invalidSystemType(type.toString());
}
}
use of com.emc.storageos.db.client.model.DiscoveredSystemObject in project coprhd-controller by CoprHD.
the class DiscoverTaskCompleter method setLastTime.
@Override
public final void setLastTime(DbClient dbClient, long time) {
Class type = getType();
if (DiscoveredSystemObject.class.isAssignableFrom(type)) {
try {
DiscoveredSystemObject dbObject = (DiscoveredSystemObject) DataObject.createInstance(type, getId());
dbObject.trackChanges();
dbObject.setLastDiscoveryRunTime(time);
dbClient.persistObject(dbObject);
} catch (InstantiationException ex) {
DatabaseException.fatals.queryFailed(ex);
} catch (IllegalAccessException ex) {
DatabaseException.fatals.queryFailed(ex);
}
} else {
throw new RuntimeException("Unsupported system Type : " + type.toString());
}
}
use of com.emc.storageos.db.client.model.DiscoveredSystemObject in project coprhd-controller by CoprHD.
the class DiscoverTaskCompleter method setNextRunTime.
@Override
public final void setNextRunTime(DbClient dbClient, long time) {
Class type = getType();
if (DiscoveredSystemObject.class.isAssignableFrom(type)) {
try {
DiscoveredSystemObject dbObject = (DiscoveredSystemObject) DataObject.createInstance(type, getId());
dbObject.trackChanges();
dbObject.setNextDiscoveryRunTime(time);
dbClient.persistObject(dbObject);
} catch (InstantiationException ex) {
DatabaseException.fatals.queryFailed(ex);
} catch (IllegalAccessException ex) {
DatabaseException.fatals.queryFailed(ex);
}
} else {
throw new RuntimeException("Unsupported system Type : " + type.toString());
}
}
Aggregations