use of com.emc.storageos.exceptions.DeviceControllerException in project coprhd-controller by CoprHD.
the class ExportWorkflowEntryPoints method exportGroupChangePathParams.
/**
* Changes the PathParams (e.g. maxPaths, pathsPerInitiator) for a volume in all the
* ExportMasks containing that volume in an Export Group.
*
* @param storageURI
* -- URI of storage system containing the volume.
* @param exportGroupURI
* -- URI of Export Group to be processed.
* @param volumeURI
* -- URI of volume that is chaning VPool parameters for PathParams
* @param token
* -- String for completers.
* @throws ControllerException
*/
public void exportGroupChangePathParams(URI storageURI, URI exportGroupURI, URI volumeURI, String token) throws ControllerException {
try {
WorkflowStepCompleter.stepExecuting(token);
final String workflowKey = "exportChangeParams";
if (!WorkflowService.getInstance().hasWorkflowBeenCreated(token, workflowKey)) {
DiscoveredSystemObject storage = ExportWorkflowUtils.getStorageSystem(_dbClient, storageURI);
MaskingOrchestrator orchestrator = getOrchestrator(storage.getSystemType());
orchestrator.exportGroupChangePathParams(storageURI, exportGroupURI, volumeURI, token);
// Mark this workflow as created/executed so we don't do it again on retry/resume
WorkflowService.getInstance().markWorkflowBeenCreated(token, workflowKey);
}
} catch (Exception e) {
DeviceControllerException exception = DeviceControllerException.exceptions.exportGroupChangePathParams(e);
WorkflowStepCompleter.stepFailed(token, exception);
}
}
use of com.emc.storageos.exceptions.DeviceControllerException in project coprhd-controller by CoprHD.
the class ExportWorkflowEntryPoints method exportChangePortGroup.
public void exportChangePortGroup(URI storageURI, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String token) {
try {
WorkflowStepCompleter.stepExecuting(token);
final String workflowKey = "exportChangePortGroup";
if (!WorkflowService.getInstance().hasWorkflowBeenCreated(token, workflowKey)) {
DiscoveredSystemObject storage = ExportWorkflowUtils.getStorageSystem(_dbClient, storageURI);
MaskingOrchestrator orchestrator = getOrchestrator(storage.getSystemType());
orchestrator.changePortGroup(storageURI, exportGroupURI, portGroupURI, exportMaskURIs, waitForApproval, token);
// Mark this workflow as created/executed so we don't do it again on retry/resume
WorkflowService.getInstance().markWorkflowBeenCreated(token, workflowKey);
} else {
_log.info("Workflow for exportChangePortGroup is already created");
}
} catch (Exception e) {
DeviceControllerException exception = DeviceControllerException.exceptions.exportGroupChangePortGroupError(e);
WorkflowStepCompleter.stepFailed(token, exception);
}
}
use of com.emc.storageos.exceptions.DeviceControllerException in project coprhd-controller by CoprHD.
the class HDSMaskingOrchestrator method exportGroupCreate.
/**
* Create storage level masking components to support the requested
* ExportGroup object. This operation will be flexible enough to take into
* account initiators that are in some already existent in some
* StorageGroup. In such a case, the underlying masking component will be
* "adopted" by the ExportGroup. Further operations against the "adopted"
* mask will only allow for addition and removal of those initiators/volumes
* that were added by a Bourne request. Existing initiators/volumes will be
* maintained.
*
* @param storageURI
* - URI referencing underlying storage array
* @param exportGroupURI
* - URI referencing Bourne-level masking, ExportGroup
* @param initiatorURIs
* - List of Initiator URIs
* @param volumeMap
* - Map of Volume URIs to requested Integer URI
* @param token
* - Identifier for operation
* @throws Exception
*/
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = null;
try {
BlockStorageDevice device = getDevice();
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
_log.info("export_create: initiator list non-empty");
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
// Create two steps, one for Zoning, one for the ExportGroup actions.
// This step is for zoning. It is not specific to a single
// NetworkSystem, as it will look at all the initiators and targets and compute
// the zones required (which might be on multiple NetworkSystems.)
boolean createdSteps = determineExportGroupCreateSteps(workflow, null, device, storage, exportGroup, initiatorURIs, volumeMap, false, token);
String zoningStep = generateDeviceSpecificZoningCreateWorkflow(workflow, EXPORT_GROUP_MASKING_TASK, exportGroup, null, volumeMap);
if (createdSteps && null != zoningStep) {
// Execute the plan and allow the WorkflowExecutor to fire the
// taskCompleter.
String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
}
} else {
_log.info("export_create: initiator list");
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
if (taskCompleter != null) {
taskCompleter.error(_dbClient, DeviceControllerException.errors.vmaxExportGroupCreateError(dex.getMessage()));
}
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
// TODO add service code here
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.exceptions.DeviceControllerException in project coprhd-controller by CoprHD.
the class DataCollectionJobConsumer method invokeJob.
public void invokeJob(final DataCollectionJob job) throws Exception {
if (job instanceof DataCollectionScanJob) {
throw new DeviceControllerException("Invoked wrong job type : " + job.getType());
}
DataCollectionTaskCompleter completer = job.getCompleter();
// set the next run time based on the time this discovery job is started (not the time it's queued)
completer.setNextRunTime(_dbClient, System.currentTimeMillis() + JobIntervals.get(job.getType()).getInterval() * 1000);
completer.updateObjectState(_dbClient, DiscoveredDataObject.DataCollectionJobStatus.IN_PROGRESS);
// get the node that this discovery is being run on so it is displayed in the UI
String jobType = job.getType();
String nodeId = _coordinator.getInetAddessLookupMap().getNodeId();
job.updateTask(_dbClient, "Started " + jobType + " on node " + nodeId);
/**
* TODO ISILON or VNXFILE
* AccessProfile needs to get created, for each device Type.
* Hence for isilon or vnxFile discovery, add logic in getAccessProfile
* to set the required parameters for Discovery.
*/
AccessProfile profile = _util.getAccessProfile(completer.getType(), completer.getId(), jobType, job.getNamespace());
profile.setProps(new HashMap<String, String>(_configInfo));
if (job instanceof DataCollectionArrayAffinityJob) {
List<URI> hostIds = ((DataCollectionArrayAffinityJob) job).getHostIds();
if (hostIds != null && !hostIds.isEmpty()) {
profile.getProps().put(Constants.HOST_IDS, StringUtils.join(hostIds, Constants.ID_DELIMITER));
}
List<URI> systemIds = ((DataCollectionArrayAffinityJob) job).getSystemIds();
if (systemIds != null && !systemIds.isEmpty()) {
profile.getProps().put(Constants.SYSTEM_IDS, StringUtils.join(systemIds, Constants.ID_DELIMITER));
Iterator<StorageSystem> storageSystems = _dbClient.queryIterativeObjects(StorageSystem.class, systemIds);
List<String> systemSerialIds = new ArrayList<String>();
while (storageSystems.hasNext()) {
StorageSystem systemObj = storageSystems.next();
systemSerialIds.add(systemObj.getSerialNumber());
}
if (!systemSerialIds.isEmpty()) {
profile.getProps().put(Constants.SYSTEM_SERIAL_IDS, StringUtils.join(systemSerialIds, Constants.ID_DELIMITER));
}
}
}
profile.setCimConnectionFactory(_connectionFactory);
profile.setCurrentSampleTime(System.currentTimeMillis());
DataCollectionJobInvoker invoker = new DataCollectionJobInvoker(profile, _configInfo, _dbClient, _coordinator, _networkDeviceController, _locker, job.getNamespace(), completer);
invoker.process(applicationContext);
job.ready(_dbClient);
}
use of com.emc.storageos.exceptions.DeviceControllerException in project coprhd-controller by CoprHD.
the class IsilonFileStorageDevice method doCheckFSDependencies.
@Override
public BiosCommandResult doCheckFSDependencies(StorageSystem storage, FileDeviceInputOutput args) {
_log.info("Checking file system {} has dependencies in storage array: {}", args.getFsName(), storage.getLabel());
boolean hasDependency = true;
String vnasName = null;
VirtualNAS vNas = args.getvNAS();
if (vNas != null) {
vnasName = vNas.getNasName();
}
try {
String fsMountPath = args.getFsMountPath();
hasDependency = doesNFSExportExistsForFSPath(storage, vnasName, fsMountPath);
if (!hasDependency) {
hasDependency = doesCIFSShareExistsForFSPath(storage, vnasName, fsMountPath);
}
if (!hasDependency) {
hasDependency = doesSnapshotExistsForFSPath(storage, vnasName, fsMountPath);
}
if (hasDependency) {
_log.error("File system has dependencies on array: {}", args.getFsName());
DeviceControllerException e = DeviceControllerException.exceptions.fileSystemHasDependencies(fsMountPath);
return BiosCommandResult.createErrorResult(e);
}
_log.info("File system has no dependencies on array: {}", args.getFsName());
return BiosCommandResult.createSuccessfulResult();
} catch (IsilonException e) {
_log.error("Checking FS dependencies failed.", e);
throw e;
}
}
Aggregations