use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportCreateCompleter in project coprhd-controller by CoprHD.
the class VPlexDeviceController method exportGroupCreate.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.impl.vplex.VplexController#exportGroupCreate(java.net.URI, java.net.URI,
* java.util.Map,
* java.util.List, java.lang.String)
*/
@Override
public void exportGroupCreate(URI vplex, URI export, List<URI> initiators, Map<URI, Integer> volumeMap, String opId) throws ControllerException {
ExportCreateCompleter completer = null;
try {
WorkflowStepCompleter.stepExecuting(opId);
completer = new ExportCreateCompleter(export, volumeMap, opId);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, export);
StorageSystem vplexSystem = _dbClient.queryObject(StorageSystem.class, vplex);
_log.info(String.format("VPLEX exportGroupCreate %s vplex %s", exportGroup.getLabel(), vplexSystem.getNativeGuid()));
URI srcVarray = exportGroup.getVirtualArray();
boolean isRecoverPointExport = ExportUtils.checkIfInitiatorsForRP(_dbClient, exportGroup.getInitiators());
initiators = VPlexUtil.filterInitiatorsForVplex(_dbClient, initiators);
if (initiators == null || initiators.isEmpty()) {
_log.info("ExportGroup created with no initiators connected to VPLEX supplied, no need to orchestrate VPLEX further.");
completer.ready(_dbClient);
return;
}
// Determine whether this export will be done across both VPLEX clusters, or just one.
// If both, we will set up some data structures to handle both exports.
// Distributed volumes can be exported to both clusters.
// Local volumes may be from either the src or HA varray, and will be exported
// the the appropriate hosts.
// Hosts may have connectivity to one varray (local or HA), or both.
// Only one HA varray is allowed (technically one Varray not matching the ExportGroup).
// It is persisted in the exportGroup.altVirtualArray map with the Vplex System ID as key.
// Get a mapping of Virtual Array to the Volumes visible in each Virtual Array.
Map<URI, Set<URI>> varrayToVolumes = VPlexUtil.mapBlockObjectsToVarrays(_dbClient, volumeMap.keySet(), vplex, exportGroup);
// Extract the src volumes into their own set.
Set<URI> srcVolumes = varrayToVolumes.get(srcVarray);
// Remove the srcVolumes from the varraysToVolumesMap
varrayToVolumes.remove(srcVarray);
URI haVarray = null;
// And we're not exporting to recover point Initiators
if (!varrayToVolumes.isEmpty() && !isRecoverPointExport) {
// Make sure there is only one "HA" varray and return the HA virtual array.
haVarray = VPlexUtil.pickHAVarray(varrayToVolumes);
}
// Partition the initiators by Varray.
// Some initiators may be connected to both virtual arrays, others connected to
// only one of the virtual arrays or the other.
List<URI> varrayURIs = new ArrayList<URI>();
varrayURIs.add(srcVarray);
if (haVarray != null) {
varrayURIs.add(haVarray);
}
Map<URI, List<URI>> varrayToInitiators = VPlexUtil.partitionInitiatorsByVarray(_dbClient, initiators, varrayURIs, vplexSystem);
if (varrayToInitiators.isEmpty()) {
throw VPlexApiException.exceptions.exportCreateNoinitiatorsHaveCorrectConnectivity(initiators.toString(), varrayURIs.toString());
}
// Validate that the export can be successful:
// If both src and ha volumes are present, all hosts must be connected.
// Otherwise, at least one host must be connected.
URI source = (srcVolumes == null || srcVolumes.isEmpty()) ? null : srcVarray;
VPlexUtil.validateVPlexClusterExport(_dbClient, source, haVarray, initiators, varrayToInitiators);
Workflow workflow = _workflowService.getNewWorkflow(this, "exportGroupCreate", true, opId);
// HA side initiators, and volumes accessible from the HA side.
if (haVarray != null && varrayToInitiators.get(haVarray) != null) {
exportGroup.putAltVirtualArray(vplex.toString(), haVarray.toString());
_dbClient.updateObject(exportGroup);
}
findAndUpdateFreeHLUsForClusterExport(vplexSystem, exportGroup, initiators, volumeMap);
// Do the source side export if there are src side volumes and initiators.
if (srcVolumes != null && varrayToInitiators.get(srcVarray) != null) {
assembleExportMasksWorkflow(vplex, export, srcVarray, varrayToInitiators.get(srcVarray), ExportMaskUtils.filterVolumeMap(volumeMap, srcVolumes), true, workflow, null, opId);
}
// HA side initiators, and volumes accessible from the HA side.
if (haVarray != null && varrayToInitiators.get(haVarray) != null) {
assembleExportMasksWorkflow(vplex, export, haVarray, varrayToInitiators.get(haVarray), ExportMaskUtils.filterVolumeMap(volumeMap, varrayToVolumes.get(haVarray)), true, workflow, null, opId);
}
// Initiate the workflow.
StringBuilder buf = new StringBuilder();
buf.append(String.format("VPLEX create ExportGroup %s for initiators %s completed successfully", export, initiators.toString()));
workflow.executePlan(completer, buf.toString());
_log.info("VPLEX exportGroupCreate workflow scheduled");
} catch (VPlexApiException vae) {
_log.error("Exception creating Export Group: " + vae.getMessage(), vae);
failStep(completer, opId, vae);
} catch (Exception ex) {
_log.error("Exception creating Export Group: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.CREATE_EXPORT_GROUP.getName();
ServiceError serviceError = VPlexApiException.errors.exportGroupCreateFailed(opName, ex);
failStep(completer, opId, serviceError);
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportCreateCompleter in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method exportGroupCreate.
/**
* Export one or more volumes. The volumeToExports parameter has
* all the information required to do the add volumes operation.
*
* @param export URI of ExportMask
* @param volumeMap Volume-lun map to be part of the export mask
* @param initiatorURIs List of URIs for the initiators to be added to the export mask
* @param opId Operation ID
* @throws com.emc.storageos.volumecontroller.ControllerException
*/
@Override
public void exportGroupCreate(URI export, Map<URI, Integer> volumeMap, List<URI> initiatorURIs, String opId) throws ControllerException {
ExportTaskCompleter taskCompleter = new ExportCreateCompleter(export, volumeMap, opId);
Workflow workflow = null;
try {
// Do some initial sanitizing of the export parameters
StringSetUtil.removeDuplicates(initiatorURIs);
workflow = _wfUtils.newWorkflow("exportGroupCreate", false, opId);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, export);
Map<URI, Map<URI, Integer>> storageToVolumes = getStorageToVolumeMap(volumeMap);
for (Map.Entry<URI, Map<URI, Integer>> entry : storageToVolumes.entrySet()) {
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), initiatorURIs, entry.getKey());
boolean acquiredLocks = _wfUtils.getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "ExportGroupCreate: " + exportGroup.getLabel());
}
// Initialize the Map of objects to export with all objects.
Map<URI, Integer> objectsToAdd = new HashMap<URI, Integer>(entry.getValue());
String waitFor = null;
ProtectionExportController protectionController = getProtectionExportController();
waitFor = protectionController.addStepsForExportGroupCreate(workflow, null, waitFor, export, objectsToAdd, entry.getKey(), initiatorURIs);
if (!objectsToAdd.isEmpty()) {
// There are no export BlockObjects tied to the current storage system that have an associated protection
// system. We can just create a step to call the block controller directly for export group create.
_log.info(String.format("Generating exportGroupCreates steps for objects %s associated with storage system [%s]", objectsToAdd, entry.getKey()));
_wfUtils.generateExportGroupCreateWorkflow(workflow, null, waitFor, entry.getKey(), export, objectsToAdd, initiatorURIs);
}
}
workflow.executePlan(taskCompleter, "Exported to all devices successfully.");
} catch (Exception ex) {
String message = "exportGroupCreate caught an exception.";
_log.error(message, ex);
_wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
taskCompleter.error(_dbClient, serviceError);
}
}
Aggregations