use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class ExportService method processReserveRequest.
private void processReserveRequest(Volume vol, String openstackTenantId) {
StringMap extensions = vol.getExtensions();
if (extensions == null) {
extensions = new StringMap();
}
extensions.put("status", ComponentStatus.ATTACHING.getStatus().toLowerCase());
vol.setExtensions(extensions);
_dbClient.updateObject(vol);
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class ExportService method processAttachToInstance.
private void processAttachToInstance(Volume vol, VolumeActionRequest.AttachToInstance attachToInst, String openstackTenantId) {
_log.info("Attach to the nova instance request");
// Step 1: get list of host initiators to be added
_log.info("THE ATTACH.INSTANCE IS {}", attachToInst.instance_uuid.toString());
_log.info("ID IS {}", vol.getId().toString());
_log.info("extensions IS {}", vol.getExtensions());
if (vol.getExtensions() == null) {
vol.setExtensions(new StringMap());
}
vol.getExtensions().put("OPENSTACK_NOVA_INSTANCE_ID", attachToInst.instance_uuid.toString());
vol.getExtensions().put("OPENSTACK_NOVA_INSTANCE_MOUNTPOINT", attachToInst.mountpoint.toString());
vol.getExtensions().put("OPENSTACK_ATTACH_MODE", attachToInst.mode);
vol.getExtensions().put("status", ComponentStatus.IN_USE.getStatus().toLowerCase());
_dbClient.updateObject(vol);
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class ExportService method setReadOnlyFlag.
private void setReadOnlyFlag(Volume vol, VolumeActionRequest.ReadOnlyVolume readonlyVolume, String openstackTenantId) {
StringMap extensions = vol.getExtensions();
if (extensions == null) {
extensions = new StringMap();
}
if (readonlyVolume.readonly.contains("true")) {
extensions.put("readonly", "true");
} else {
extensions.put("readonly", "false");
}
vol.setExtensions(extensions);
_dbClient.updateObject(vol);
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class ExportService method actionOnVolume.
/**
* Action could be either export or unexport volume
*
* NOTE: This is an asynchronous operation.
*
* @prereq none
*
* @param param POST data containing the volume action information.
* The different kinds of operations that are part of the export are
* Reserve, unreserve, terminate, begin detach, detach, attach, init connection,
* extend, set bootable, set Readonly
*
* os-reserve: reserve a volume for initiating the attach operation.
* os-unreserve: unreserve the volume to indicate the attach operation being performed is over.
* os-begin_detaching: Initiate the detach operation by setting the status to detaching.
* os-detach: Set the detach related status in the db.
* os-terminate_connection: detach int hebackend.
* os-initialize_connection: create export of the volume to the nova node.
* os-attach: perform the mount of the volume that has been exported to the nova instance.
* os-extend: extend size of volume.
* os-reset_status: reset the status of the volume.
* os-set_bootable: set bootable flag on volume.
* os-update_readonly_flag: update the volume as readonly.
*
* @brief Export/Unexport volume
* @return A reference to a BlockTaskList containing a list of
* TaskResourceRep references specifying the task data for the
* volume creation tasks.
* @throws InternalException
* @throws InterruptedException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{volume_id}/action")
@CheckPermission(roles = { Role.SYSTEM_MONITOR, Role.TENANT_ADMIN }, acls = { ACL.ANY })
public Object actionOnVolume(@PathParam("tenant_id") String openstackTenantId, @PathParam("volume_id") String volumeId, String input) throws InternalException, InterruptedException {
// Step 1: Parameter validation
// Eventually we should use the project id that comes from the API
_log.info("String format input is = {}", input);
_log.info("Action on volume: id = {}", volumeId);
boolean bReserve = false;
boolean bUnReserve = false;
boolean bTerminate = false;
boolean bBeginDetach = false;
boolean bDetach = false;
boolean bAttach = false;
boolean bInitCon = false;
boolean bExtend = false;
boolean bBootable = false;
boolean bReadonly = false;
if (input.contains(ExportOperations.OS_RESERVE.getOperation()))
bReserve = true;
if (input.contains(ExportOperations.OS_UNRESERVE.getOperation()))
bUnReserve = true;
if (input.contains(ExportOperations.OS_TERMINATE_CONNECTION.getOperation()))
bTerminate = true;
if (input.contains(ExportOperations.OS_BEGIN_DETACHING.getOperation()))
bBeginDetach = true;
if (input.contains(ExportOperations.OS_DETACH.getOperation()))
bDetach = true;
if (input.contains(ExportOperations.OS_ATTACH.getOperation()))
bAttach = true;
if (input.contains(ExportOperations.OS_INITIALIZE_CONNECTION.getOperation()))
bInitCon = true;
if (input.contains(ExportOperations.OS_EXTEND.getOperation())) {
// for expand volume
// expand size has to be numeric and null size can't be accepted
// for expand volume verify passed extendsize is in numeric format
// otherwise it will result in Bad Request for improper input
String[] extendStrings = input.split(":");
String sizeString = extendStrings[2].replaceAll("}", "");
_log.debug("extend string size value = {}", sizeString);
if ((sizeString == null) || !isNumeric(sizeString.trim())) {
_log.info("Improper extend size ={}", sizeString.trim());
return CinderApiUtils.createErrorResponse(400, "Bad request : improper volume extend size ");
}
bExtend = true;
}
if (input.contains(ExportOperations.OS_SET_BOOTABLE.getOperation()))
bBootable = true;
if (input.contains(ExportOperations.OS_UPDATE_READONLY.getOperation()))
bReadonly = true;
if (input.contains(ExportOperations.OS_RESET_STATUS.getOperation())) {
Volume vol = findVolume(volumeId, openstackTenantId);
if (vol != null) {
return changeVolumeStatus(vol, input);
} else {
return Response.status(404).build();
}
}
_log.info(String.format("bReserve: %b , bUnReserve: %b, bTerminate:%b, bBeginDetach:%b , bDetach:%b , " + "bAttach:%b , bInitCon:%b , bExtend:%b, bReadonly:%b", bReserve, bUnReserve, bTerminate, bBeginDetach, bDetach, bAttach, bInitCon, bExtend, bReadonly));
// TODO : handle xml format requests also and cater to the operations
Gson gson = new Gson();
VolumeActionRequest action = gson.fromJson(input, VolumeActionRequest.class);
Volume vol = findVolume(volumeId, openstackTenantId);
if (vol == null) {
_log.info("Invalid volume id ={} ", volumeId);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invaild volume id");
}
// Step 2: Check if the user has rights for volume modification
verifyUserCanModifyVolume(vol);
_log.debug("User can modify volume");
// if ( (action.attach.connector!=null) && (action.attach.connector.ip!=null) && (action.attach.connector.ip.length() > 0)){
if ((bInitCon) && (action.attach.connector != null) && (action.attach.connector.ip != null) && (action.attach.connector.ip.length() > 0)) {
String chosenProtocol = getProtocol(vol, action.attach.connector);
boolean bIsSuccess = processAttachRequest(vol, action.attach, openstackTenantId, chosenProtocol);
if (bIsSuccess) {
if (chosenProtocol.equals("iSCSI")) {
return populateIscsiConnectionInfo(vol);
} else if (chosenProtocol.equals("FC")) {
return populateFcConnectionInfo(chosenProtocol, vol, action, openstackTenantId);
}
} else {
vol.getExtensions().put("status", "OPENSTACK_ATTACHING_TIMED_OUT");
_dbClient.updateObject(vol);
_log.info("After fifteen tries, the ITLs are not found yet and hence failing initilize connection");
}
throw APIException.internalServerErrors.genericApisvcError("Export failed", new Exception("Initialize_connection operation failed due to timeout"));
} else if (bDetach) {
getVolExtensions(vol).put("status", ComponentStatus.AVAILABLE.getStatus().toLowerCase());
_dbClient.updateObject(vol);
return Response.status(202).build();
} else if (bBeginDetach) {
if (getVolExtensions(vol).containsKey("status") && getVolExtensions(vol).get("status").equals(ComponentStatus.IN_USE.getStatus().toLowerCase())) {
getVolExtensions(vol).put("status", ComponentStatus.DETACHING.getStatus().toLowerCase());
_dbClient.updateObject(vol);
return Response.status(202).build();
} else {
_log.info("Volume is already in detached state.");
throw APIException.internalServerErrors.genericApisvcError("Unexport failed", new Exception("Volume not in attached state"));
}
} else if (bTerminate) {
StringMap extensionsMap = getVolExtensions(vol);
if (extensionsMap.containsKey("status") && (extensionsMap.get("status").equals(ComponentStatus.DETACHING.getStatus().toLowerCase()) || extensionsMap.get("status").equals(ComponentStatus.IN_USE.getStatus().toLowerCase()))) {
extensionsMap.put("status", ComponentStatus.DETACHING.getStatus().toLowerCase());
_dbClient.updateObject(vol);
String chosenProtocol = getProtocol(vol, action.detach.connector);
processDetachRequest(vol, action.detach, openstackTenantId, chosenProtocol);
extensionsMap.put("status", ComponentStatus.AVAILABLE.getStatus().toLowerCase());
extensionsMap.remove("OPENSTACK_NOVA_INSTANCE_ID");
extensionsMap.remove("OPENSTACK_NOVA_INSTANCE_MOUNTPOINT");
extensionsMap.remove("OPENSTACK_ATTACH_MODE");
_dbClient.updateObject(vol);
return Response.status(202).build();
} else {
_log.info("Volume not in a state for terminate connection.");
throw APIException.internalServerErrors.genericApisvcError("Unexport failed", new Exception("Volume not in state for termination"));
}
} else // (action.attachToInstance.instance_uuid.length() > 0)){
if (bAttach) {
_log.info("IN THE IF CONDITION OF THE ATTACH VOLUME TO INSTANCE");
if ((action.attachToInstance != null) && (action.attachToInstance.instance_uuid != null) && (action.attachToInstance.instance_uuid.length() > 0)) {
processAttachToInstance(vol, action.attachToInstance, openstackTenantId);
return Response.status(202).build();
} else {
throw APIException.badRequests.parameterIsNullOrEmpty("Instance");
}
} else if (bReserve) {
_log.info("IN THE IF CONDITION OF RESERVE");
if (getVolExtensions(vol).containsKey("status") && getVolExtensions(vol).get("status").equals(ComponentStatus.ATTACHING.getStatus().toLowerCase())) {
_log.debug("Reserved Volume cannot be reserved again");
return CinderApiUtils.createErrorResponse(400, "Bad request : volume is already reserved");
}
processReserveRequest(vol, openstackTenantId);
return Response.status(202).build();
} else if (bUnReserve) {
processUnReserveRequest(vol, openstackTenantId);
return Response.status(202).build();
} else if (bBootable) {
_log.debug("set Volume bootable Flag");
if (action.bootVol != null) {
setBootable(vol, action.bootVol, openstackTenantId);
return Response.status(200).build();
} else {
throw APIException.badRequests.parameterIsNullOrEmpty("Volume");
}
} else if (bReadonly) {
_log.debug("Set Readonly Flag for Volume");
if (action.readonlyVol != null) {
setReadOnlyFlag(vol, action.readonlyVol, openstackTenantId);
return Response.status(200).build();
} else {
throw APIException.badRequests.parameterIsNullOrEmpty("Volume");
}
} else if (bExtend) {
// extend volume size
_log.info("Extend existing volume size");
if (action.extendVol != null) {
if (volumeId == null) {
_log.info("Source volume id is empty ");
return CinderApiUtils.createErrorResponse(404, "Not Found : source volume id is empty");
}
long extend_size = action.extendVol.new_size * GB;
if (extend_size <= vol.getCapacity()) {
_log.info(String.format("expandVolume: VolumeId id: %s, Current size: %d, New size: %d ", volumeId, vol.getCapacity(), extend_size));
return CinderApiUtils.createErrorResponse(400, "Bad request : New size should be larger than old");
}
// action.extendVol.new_size
extendExistingVolume(vol, extend_size, openstackTenantId, volumeId);
return Response.status(202).build();
} else {
throw APIException.badRequests.parameterIsNullOrEmpty("Volume");
}
}
throw APIException.badRequests.parameterIsNotValid("Action Type");
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class BlockVplexVolumeIngestOrchestrator method findOrCreateExportGroup.
/**
* Find or Create an ExportGroup.
*
* @param vplex -- VPLEX StorageSystem
* @param array -- Array StorageSystem
* @param initiators -- Collection<Initiator> representing VPLEX back-end ports.
* @param virtualArrayURI
* @param projectURI
* @param tenantURI
* @param numPaths Value of maxPaths to be put in ExportGroup
* @param unmanagedExportMask the unmanaged export mask
* @return existing or newly created ExportGroup (not yet persisted)
*/
private ExportGroup findOrCreateExportGroup(IngestionRequestContext requestContext, StorageSystem array, Collection<Initiator> initiators, URI virtualArrayURI, URI projectURI, URI tenantURI, int numPaths, UnManagedExportMask unmanagedExportMask) {
StorageSystem vplex = requestContext.getStorageSystem();
String arrayName = array.getSystemType().replace("block", "") + array.getSerialNumber().substring(array.getSerialNumber().length() - 4);
String groupName = unmanagedExportMask.getMaskName() + "_" + arrayName;
ExportGroup exportGroup = requestContext.findExportGroup(groupName, projectURI, virtualArrayURI, null, null);
if (null != exportGroup) {
_logger.info(String.format("Returning existing ExportGroup %s", exportGroup.getLabel()));
return exportGroup;
}
List<ExportGroup> exportGroups = CustomQueryUtility.queryActiveResourcesByConstraint(_dbClient, ExportGroup.class, PrefixConstraint.Factory.getFullMatchConstraint(ExportGroup.class, "label", groupName));
if (null != exportGroups && !exportGroups.isEmpty()) {
for (ExportGroup group : exportGroups) {
if (null != group) {
_logger.info(String.format("Returning existing ExportGroup %s", group.getLabel()));
exportGroup = group;
}
}
} else {
Map<String, ExportGroup> possibleExportGroups = new HashMap<String, ExportGroup>();
Set<String> initiatorUris = new HashSet<String>();
for (Initiator initiator : initiators) {
// Determine all the possible existing Export Groups
List<ExportGroup> groups = ExportUtils.getInitiatorExportGroups(initiator, _dbClient);
for (ExportGroup group : groups) {
if (!possibleExportGroups.containsKey(group.getId().toString())) {
possibleExportGroups.put(group.getId().toString(), group);
}
}
initiatorUris.add(initiator.getId().toString());
}
// If there are possible Export Groups, look for one with that matches on inits, varray, project, and tenant.
for (ExportGroup group : possibleExportGroups.values()) {
if (URIUtil.identical(group.getVirtualArray(), virtualArrayURI) && URIUtil.identical(group.getProject().getURI(), projectURI) && URIUtil.identical(group.getTenant().getURI(), tenantURI)) {
if (group.getInitiators().containsAll(initiatorUris)) {
_logger.info(String.format("Returning existing ExportGroup %s from database.", group.getLabel()));
return group;
}
}
}
// No existing group has the mask, let's create one.
exportGroup = new ExportGroup();
exportGroup.setLabel(groupName);
exportGroup.setProject(new NamedURI(projectURI, exportGroup.getLabel()));
exportGroup.setVirtualArray(vplex.getVirtualArray());
exportGroup.setTenant(new NamedURI(tenantURI, exportGroup.getLabel()));
exportGroup.setGeneratedName(groupName);
exportGroup.setVolumes(new StringMap());
exportGroup.setOpStatus(new OpStatusMap());
exportGroup.setVirtualArray(virtualArrayURI);
exportGroup.setNumPaths(numPaths);
// Add the initiators into the ExportGroup.
for (Initiator initiator : initiators) {
exportGroup.addInitiator(initiator);
}
_logger.info(String.format("Returning new ExportGroup %s", exportGroup.getLabel()));
}
return exportGroup;
}
Aggregations